"""The 'sky' command line tool.

Example usage:

  # See available commands.
  >> sky

  # Run a task, described in a yaml file.
  # Provisioning, setup, file syncing are handled.
  >> sky launch task.yaml
  >> sky launch [-c cluster_name] task.yaml

  # Show the list of running clusters.
  >> sky status

  # Tear down a specific cluster.
  >> sky down cluster_name

  # Tear down all existing clusters.
  >> sky down -a

NOTE: the order of command definitions in this file corresponds to how they are
listed in "sky --help".  Take care to put logically connected commands close to
each other.
"""
import collections
import concurrent.futures
import fnmatch
import os
import pathlib
import shlex
import shutil
import subprocess
import sys
import time
import traceback
import typing
from typing import (Any, Callable, Dict, Generator, List, Optional, Set, Tuple,
                    TypeVar, Union)

import click
import colorama
import requests as requests_lib
from rich import progress as rich_progress
import yaml

import sky
from sky import backends
from sky import catalog
from sky import clouds
from sky import dag as dag_lib
from sky import exceptions
from sky import jobs as managed_jobs
from sky import models
from sky import resources as resources_lib
from sky import serve as serve_lib
from sky import sky_logging
from sky import skypilot_config
from sky import task as task_lib
from sky.adaptors import common as adaptors_common
from sky.client import sdk
from sky.client.cli import flags
from sky.client.cli import table_utils
from sky.client.cli import utils as cli_utils
from sky.jobs.state import ManagedJobStatus
from sky.provision.kubernetes import constants as kubernetes_constants
from sky.provision.kubernetes import utils as kubernetes_utils
from sky.schemas.api import responses
from sky.server import common as server_common
from sky.server import constants as server_constants
from sky.server.requests import requests
from sky.skylet import autostop_lib
from sky.skylet import constants
from sky.skylet import job_lib
from sky.usage import usage_lib
from sky.utils import annotations
from sky.utils import cluster_utils
from sky.utils import common
from sky.utils import common_utils
from sky.utils import controller_utils
from sky.utils import dag_utils
from sky.utils import directory_utils
from sky.utils import env_options
from sky.utils import infra_utils
from sky.utils import log_utils
from sky.utils import registry
from sky.utils import resources_utils
from sky.utils import rich_utils
from sky.utils import status_lib
from sky.utils import subprocess_utils
from sky.utils import timeline
from sky.utils import ux_utils
from sky.utils import volume as volume_utils
from sky.utils import yaml_utils
from sky.utils.cli_utils import status_utils
from sky.volumes.client import sdk as volumes_sdk

if typing.TYPE_CHECKING:
    import types

    import prettytable

pd = adaptors_common.LazyImport('pandas')
logger = sky_logging.init_logger(__name__)

_CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])

_CLUSTER_FLAG_HELP = """\
A cluster name. If provided, either reuse an existing cluster with that name or
provision a new cluster with that name. Otherwise provision a new cluster with
an autogenerated name."""

# The maximum number of in-progress managed jobs to show in the status
# command.
_NUM_MANAGED_JOBS_TO_SHOW_IN_STATUS = 5
_NUM_MANAGED_JOBS_TO_SHOW = 50
_NUM_REQUESTS_TO_SHOW = 50
_DEFAULT_REQUEST_FIELDS_TO_SHOW = [
    'request_id', 'name', 'user_id', 'status', 'created_at'
]
_VERBOSE_REQUEST_FIELDS_TO_SHOW = _DEFAULT_REQUEST_FIELDS_TO_SHOW + [
    'cluster_name'
]
_DEFAULT_MANAGED_JOB_FIELDS_TO_GET = [
    'job_id', 'task_id', 'workspace', 'job_name', 'task_name', 'resources',
    'submitted_at', 'end_at', 'job_duration', 'recovery_count', 'status', 'pool'
]
_VERBOSE_MANAGED_JOB_FIELDS_TO_GET = _DEFAULT_MANAGED_JOB_FIELDS_TO_GET + [
    'current_cluster_name', 'job_id_on_pool_cluster', 'start_at', 'infra',
    'cloud', 'region', 'zone', 'cluster_resources', 'schedule_state', 'details',
    'failure_reason', 'metadata'
]
_USER_NAME_FIELD = ['user_name']
_USER_HASH_FIELD = ['user_hash']

_STATUS_PROPERTY_CLUSTER_NUM_ERROR_MESSAGE = (
    '{cluster_num} cluster{plural} {verb}. Please specify {cause} '
    'cluster to show its {property}.\nUsage: `sky status --{flag} <cluster>`')

_DAG_NOT_SUPPORTED_MESSAGE = ('YAML specifies a DAG which is only supported by '
                              '`sky jobs launch`. `{command}` supports a '
                              'single task only.')

T = TypeVar('T')


def _get_cluster_records_and_set_ssh_config(
    clusters: Optional[List[str]],
    refresh: common.StatusRefreshMode = common.StatusRefreshMode.NONE,
    all_users: bool = False,
    verbose: bool = False,
) -> List[responses.StatusResponse]:
    """Returns a list of clusters that match the glob pattern.

    Args:
        clusters: A list of cluster names to query. If None, query all clusters.
        refresh: The refresh mode for the status command.
        all_users: Whether to query clusters from all users.
            If clusters is not None, this field is ignored because cluster list
            can include other users' clusters.
    """
    # TODO(zhwu): we should move this function into SDK.
    # TODO(zhwu): this additional RTT makes CLIs slow. We should optimize this.
    if clusters is not None:
        all_users = True
    request_id = sdk.status(clusters,
                            refresh=refresh,
                            all_users=all_users,
                            _include_credentials=True,
                            _summary_response=not verbose)
    cluster_records = sdk.stream_and_get(request_id)
    # Update the SSH config for all clusters
    for record in cluster_records:
        handle = record['handle']
        name = record['name']
        if not (handle is not None and handle.cached_external_ips is not None
                and 'credentials' in record):
            # If the cluster is not UP or does not have credentials available,
            # we need to remove the cluster from the SSH config.
            cluster_utils.SSHConfigHelper.remove_cluster(name)
            continue
        if not record['credentials']:
            # The credential is missing for some reason, continue.
            logger.debug(
                f'Client did not receive SSH credential for cluster {name}')
            continue

        # During the failover, even though a cluster does not exist, the handle
        # can still exist in the record, and we check for credentials to avoid
        # updating the SSH config for non-existent clusters.
        credentials = record['credentials']
        ips = handle.cached_external_ips
        if isinstance(handle.launched_resources.cloud, clouds.Kubernetes):
            # Replace the proxy command to proxy through the SkyPilot API
            # server with websocket.
            escaped_key_path = shlex.quote(
                (cluster_utils.SSHConfigHelper.generate_local_key_file(
                    handle.cluster_name, credentials)))
            escaped_executable_path = shlex.quote(sys.executable)
            escaped_websocket_proxy_path = shlex.quote(
                f'{directory_utils.get_sky_dir()}/templates/websocket_proxy.py')
            # Instead of directly use websocket_proxy.py, we add an
            # additional proxy, so that ssh can use the head pod in the
            # cluster to jump to worker pods.
            proxy_command = (
                f'ssh -tt -i {escaped_key_path} '
                '-o StrictHostKeyChecking=no '
                '-o UserKnownHostsFile=/dev/null '
                '-o IdentitiesOnly=yes '
                '-W \'[%h]:%p\' '
                f'{handle.ssh_user}@127.0.0.1 '
                '-o ProxyCommand='
                # TODO(zhwu): write the template to a temp file, don't use
                # the one in skypilot repo, to avoid changing the file when
                # updating skypilot.
                f'\"{escaped_executable_path} '
                f'{escaped_websocket_proxy_path} '
                f'{server_common.get_server_url()} '
                f'{handle.cluster_name}\"')
            credentials['ssh_proxy_command'] = proxy_command
        elif isinstance(handle.launched_resources.cloud, clouds.Slurm):
            # TODO(kevin): This is a temporary workaround, ideally we want to
            # get a shell through srun --pty bash on the existing sbatch job.

            # Proxy through the controller/login node to reach the worker node.
            if (handle.cached_internal_ips is None or
                    not handle.cached_internal_ips):
                logger.debug(
                    f'Cluster {name} does not have cached internal IPs. '
                    'Skipping SSH config update.')
                cluster_utils.SSHConfigHelper.remove_cluster(name)
                continue

            escaped_key_path = shlex.quote(
                cluster_utils.SSHConfigHelper.generate_local_key_file(
                    handle.cluster_name, credentials))
            controller_host = handle.cached_external_ips[0]

            # Build jump proxy: ssh to worker via controller/login node
            proxy_command = (f'ssh -tt -i {escaped_key_path} '
                             '-o StrictHostKeyChecking=no '
                             '-o UserKnownHostsFile=/dev/null '
                             '-o IdentitiesOnly=yes '
                             '-W %h:%p '
                             f'{handle.ssh_user}@{controller_host}')
            original_proxy = credentials.get('ssh_proxy_command')
            if original_proxy:
                proxy_command += (
                    f' -o ProxyCommand={shlex.quote(original_proxy)}')

            credentials['ssh_proxy_command'] = proxy_command

            # For Slurm, use the worker's internal IP as the SSH target
            ips = handle.cached_internal_ips

        cluster_utils.SSHConfigHelper.add_cluster(
            handle.cluster_name,
            ips,
            credentials,
            handle.cached_external_ssh_ports,
            handle.docker_user,
            handle.ssh_user,
        )

    # Clean up SSH configs for clusters that do not exist.
    #
    # We do this in a conservative way: only when a query is made for all users
    # or specific clusters. Without those, the table returned only contains the
    # current user's clusters, and the information is not enough for
    # removing clusters, because SkyPilot has no idea whether to remove
    # ssh config of a cluster from another user.
    clusters_exists = set(record['name'] for record in cluster_records)
    clusters_to_remove: Set[str] = set()
    if clusters is not None:
        clusters_to_remove = set(clusters) - clusters_exists
    elif all_users:
        clusters_to_remove = set(cluster_utils.SSHConfigHelper.
                                 list_cluster_names()) - clusters_exists

    for cluster_name in clusters_to_remove:
        cluster_utils.SSHConfigHelper.remove_cluster(cluster_name)

    return cluster_records


def _get_glob_matches(candidate_names: List[str],
                      glob_patterns: List[str],
                      resource_type: str = 'Storage') -> List[str]:
    """Returns a list of names that match the glob pattern."""
    glob_storages = []
    for glob_pattern in glob_patterns:
        glob_storage = fnmatch.filter(candidate_names, glob_pattern)
        if not glob_storage:
            click.echo(f'{resource_type} {glob_pattern} not found.')
        glob_storages.extend(glob_storage)
    return list(set(glob_storages))


def _async_call_or_wait(request_id: server_common.RequestId[T],
                        async_call: bool, request_name: str) -> Any:
    short_request_id = request_id[:8]
    if not async_call:
        try:
            return sdk.stream_and_get(request_id)
        except KeyboardInterrupt:
            logger.info(
                ux_utils.starting_message('Request will continue running '
                                          'asynchronously.') +
                f'\n{ux_utils.INDENT_SYMBOL}{colorama.Style.DIM}View logs: '
                f'{ux_utils.BOLD}sky api logs {short_request_id}'
                f'{colorama.Style.RESET_ALL}'
                f'\n{ux_utils.INDENT_SYMBOL}{colorama.Style.DIM}Or, '
                'visit: '
                f'{server_common.get_server_url()}/api/stream?'
                f'request_id={short_request_id}'
                f'\n{ux_utils.INDENT_LAST_SYMBOL}{colorama.Style.DIM}To cancel '
                'the request, run: '
                f'{ux_utils.BOLD}sky api cancel {short_request_id}'
                f'{colorama.Style.RESET_ALL}'
                f'\n{colorama.Style.RESET_ALL}')
            raise
    else:
        click.secho(f'Submitted {request_name} request: {request_id}',
                    fg='green')
        click.echo(
            f'{ux_utils.INDENT_SYMBOL}{colorama.Style.DIM}Check logs with: '
            f'{ux_utils.BOLD}sky api logs {short_request_id}'
            f'{colorama.Style.RESET_ALL}\n'
            f'{ux_utils.INDENT_SYMBOL}{colorama.Style.DIM}Or, visit: '
            f'{server_common.get_server_url()}/api/stream?'
            f'request_id={short_request_id}'
            f'\n{ux_utils.INDENT_LAST_SYMBOL}{colorama.Style.DIM}To cancel '
            'the request, run: '
            f'{ux_utils.BOLD}sky api cancel {short_request_id}'
            f'{colorama.Style.RESET_ALL}\n')


def _merge_env_vars(env_dict: Optional[Dict[str, str]],
                    env_list: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
    """Merges all values from env_list into env_dict."""
    if not env_dict:
        return env_list
    for (key, value) in env_list:
        env_dict[key] = value
    return list(env_dict.items())


def _complete_cluster_name(ctx: click.Context, param: click.Parameter,
                           incomplete: str) -> List[str]:
    """Handle shell completion for cluster names."""
    del ctx, param  # Unused.
    # TODO(zhwu): we send requests to API server for completion, which can cause
    # large latency. We should investigate caching mechanism if needed.
    response = server_common.make_authenticated_request(
        'GET',
        f'/api/completion/cluster_name?incomplete={incomplete}',
        retry=False,
        timeout=2.0,
    )
    response.raise_for_status()
    return response.json()


def _complete_storage_name(ctx: click.Context, param: click.Parameter,
                           incomplete: str) -> List[str]:
    """Handle shell completion for storage names."""
    del ctx, param  # Unused.
    response = server_common.make_authenticated_request(
        'GET',
        f'/api/completion/storage_name?incomplete={incomplete}',
        retry=False,
        timeout=2.0,
    )
    response.raise_for_status()
    return response.json()


def _complete_volume_name(ctx: click.Context, param: click.Parameter,
                          incomplete: str) -> List[str]:
    """Handle shell completion for volume names."""
    del ctx, param  # Unused.
    response = server_common.make_authenticated_request(
        'GET',
        f'/api/completion/volume_name?incomplete={incomplete}',
        retry=False,
        timeout=2.0,
    )
    response.raise_for_status()
    return response.json()


def _complete_api_request(ctx: click.Context, param: click.Parameter,
                          incomplete: str) -> List[str]:
    """Handle shell completion for API requests."""
    del ctx, param  # Unused.
    response = server_common.make_authenticated_request(
        'GET',
        f'/api/completion/api_request?incomplete={incomplete}',
        retry=False,
        timeout=2.0,
    )
    try:
        response.raise_for_status()
    except requests_lib.exceptions.HTTPError:
        # Server may be outdated/missing this API. Silently skip.
        return []
    return response.json()


def _complete_file_name(ctx: click.Context, param: click.Parameter,
                        incomplete: str) -> List[str]:
    """Handle shell completion for file names.

    Returns a special completion marker that tells click to use
    the shell's default file completion.
    """
    del ctx, param  # Unused.
    return [click.shell_completion.CompletionItem(incomplete, type='file')]


def _get_click_major_version():
    return int(click.__version__.split('.', maxsplit=1)[0])


def _get_shell_complete_args(complete_fn):
    # The shell_complete argument is only valid on click >= 8.0.
    if _get_click_major_version() >= 8:
        return dict(shell_complete=complete_fn)
    return {}


_RELOAD_ZSH_CMD = 'source ~/.zshrc'
_RELOAD_BASH_CMD = 'source ~/.bashrc'


def _install_shell_completion(ctx: click.Context, param: click.Parameter,
                              value: str):
    """A callback for installing shell completion for click."""
    del param  # Unused.
    if not value or ctx.resilient_parsing:
        return

    if value == 'auto':
        if 'SHELL' not in os.environ:
            click.secho(
                'Cannot auto-detect shell. Please specify shell explicitly.',
                fg='red')
            ctx.exit()
        else:
            value = os.path.basename(os.environ['SHELL'])

    zshrc_diff = '\n# For SkyPilot shell completion\n. ~/.sky/.sky-complete.zsh'
    bashrc_diff = ('\n# For SkyPilot shell completion'
                   '\n. ~/.sky/.sky-complete.bash')

    cmd: Optional[str] = None
    reload_cmd: Optional[str] = None

    if value == 'bash':
        install_cmd = f'_SKY_COMPLETE=bash_source sky > \
                ~/.sky/.sky-complete.bash && \
                echo "{bashrc_diff}" >> ~/.bashrc'

        cmd = (f'(grep -q "SkyPilot" ~/.bashrc) || '
               f'([[ ${{BASH_VERSINFO[0]}} -ge 4 ]] && ({install_cmd}) || '
               f'(echo "Bash must be version 4 or above." && exit 1))')

        reload_cmd = _RELOAD_BASH_CMD

    elif value == 'fish':
        cmd = '_SKY_COMPLETE=fish_source sky > \
                ~/.config/fish/completions/sky.fish'

        # Fish does not need to be reloaded and will automatically pick up
        # completions.
        reload_cmd = None

    elif value == 'zsh':
        install_cmd = f'_SKY_COMPLETE=zsh_source sky > \
                ~/.sky/.sky-complete.zsh && \
                echo "{zshrc_diff}" >> ~/.zshrc'

        cmd = f'(grep -q "SkyPilot" ~/.zshrc) || ({install_cmd})'
        reload_cmd = _RELOAD_ZSH_CMD

    else:
        click.secho(f'Unsupported shell: {value}', fg='red')
        ctx.exit()

    assert cmd is not None  # This should never be None due to ctx.exit() above
    try:
        subprocess.run(cmd,
                       shell=True,
                       check=True,
                       executable=shutil.which('bash'))
        click.secho(f'Shell completion installed for {value}', fg='green')
        if reload_cmd is not None:
            click.echo(
                'Completion will take effect once you restart the terminal: ' +
                click.style(f'{reload_cmd}', bold=True))
    except subprocess.CalledProcessError as e:
        click.secho(f'> Installation failed with code {e.returncode}', fg='red')
    ctx.exit()


def _uninstall_shell_completion(ctx: click.Context, param: click.Parameter,
                                value: str):
    """A callback for uninstalling shell completion for click."""
    del param  # Unused.
    if not value or ctx.resilient_parsing:
        return

    if value == 'auto':
        if 'SHELL' not in os.environ:
            click.secho(
                'Cannot auto-detect shell. Please specify shell explicitly.',
                fg='red')
            ctx.exit()
        else:
            value = os.path.basename(os.environ['SHELL'])

    cmd: Optional[str] = None
    reload_cmd: Optional[str] = None

    if value == 'bash':
        cmd = 'sed -i"" -e "/# For SkyPilot shell completion/d" ~/.bashrc && \
               sed -i"" -e "/sky-complete.bash/d" ~/.bashrc && \
               rm -f ~/.sky/.sky-complete.bash'

        reload_cmd = _RELOAD_BASH_CMD

    elif value == 'fish':
        cmd = 'rm -f ~/.config/fish/completions/sky.fish'
        # Fish does not need to be reloaded and will automatically pick up
        # completions.
        reload_cmd = None

    elif value == 'zsh':
        cmd = 'sed -i"" -e "/# For SkyPilot shell completion/d" ~/.zshrc && \
               sed -i"" -e "/sky-complete.zsh/d" ~/.zshrc && \
               rm -f ~/.sky/.sky-complete.zsh'

        reload_cmd = _RELOAD_ZSH_CMD

    else:
        click.secho(f'Unsupported shell: {value}', fg='red')
        ctx.exit()

    assert cmd is not None  # This should never be None due to ctx.exit() above
    try:
        subprocess.run(cmd, shell=True, check=True)
        click.secho(f'Shell completion uninstalled for {value}', fg='green')
        if reload_cmd is not None:
            click.echo(
                'Changes will take effect once you restart the terminal: ' +
                click.style(f'{reload_cmd}', bold=True))
    except subprocess.CalledProcessError as e:
        click.secho(f'> Uninstallation failed with code {e.returncode}',
                    fg='red')
    ctx.exit()


def _add_click_options(options: List[click.Option]):
    """A decorator for adding a list of click option decorators."""

    def _add_options(func):
        for option in reversed(options):
            func = option(func)
        return func

    return _add_options


def _parse_override_params(
    cloud: Optional[str] = None,
    region: Optional[str] = None,
    zone: Optional[str] = None,
    gpus: Optional[str] = None,
    cpus: Optional[str] = None,
    memory: Optional[str] = None,
    instance_type: Optional[str] = None,
    use_spot: Optional[bool] = None,
    image_id: Optional[str] = None,
    disk_size: Optional[int] = None,
    disk_tier: Optional[str] = None,
    network_tier: Optional[str] = None,
    ports: Optional[Tuple[str, ...]] = None,
    config_override: Optional[Dict[str, Any]] = None,
) -> Dict[str, Any]:
    """Parses the override parameters into a dictionary."""
    override_params: Dict[str, Any] = {}
    if cloud is not None:
        if cloud.lower() == 'none' or cloud == '*':
            override_params['cloud'] = None
        else:
            override_params['cloud'] = registry.CLOUD_REGISTRY.from_str(cloud)
    if region is not None:
        if region.lower() == 'none' or region == '*':
            override_params['region'] = None
        else:
            override_params['region'] = region
    if zone is not None:
        if zone.lower() == 'none' or zone == '*':
            override_params['zone'] = None
        else:
            override_params['zone'] = zone
    if gpus is not None:
        if gpus.lower() == 'none':
            override_params['accelerators'] = None
        else:
            override_params['accelerators'] = gpus
    if cpus is not None:
        if cpus.lower() == 'none':
            override_params['cpus'] = None
        else:
            override_params['cpus'] = cpus
    if memory is not None:
        if memory.lower() == 'none':
            override_params['memory'] = None
        else:
            override_params['memory'] = memory
    if instance_type is not None:
        if instance_type.lower() == 'none':
            override_params['instance_type'] = None
        else:
            override_params['instance_type'] = instance_type
    if use_spot is not None:
        override_params['use_spot'] = use_spot
    if image_id is not None:
        if image_id.lower() == 'none':
            override_params['image_id'] = None
        else:
            override_params['image_id'] = image_id
    if disk_size is not None:
        override_params['disk_size'] = disk_size
    if disk_tier is not None:
        if disk_tier.lower() == 'none':
            override_params['disk_tier'] = None
        else:
            override_params['disk_tier'] = disk_tier
    if network_tier is not None:
        if network_tier.lower() == 'none':
            override_params['network_tier'] = None
        else:
            override_params['network_tier'] = network_tier
    if ports:
        if any(p.lower() == 'none' for p in ports):
            if len(ports) > 1:
                with ux_utils.print_exception_no_traceback():
                    raise ValueError('Cannot specify both "none" and other '
                                     'ports.')
            override_params['ports'] = None
        else:
            override_params['ports'] = ports
    if config_override:
        override_params['_cluster_config_overrides'] = config_override
    return override_params


def _check_yaml_only(
        entrypoint: str) -> Tuple[bool, Optional[Dict[str, Any]], bool, str]:
    """Checks if entrypoint is a readable YAML file without confirmation.

    Args:
        entrypoint: Path to a YAML file.
    """
    is_yaml = True
    config: Optional[List[Dict[str, Any]]] = None
    result = None
    shell_splits = shlex.split(entrypoint)
    yaml_file_provided = (len(shell_splits) == 1 and
                          (shell_splits[0].endswith('yaml') or
                           shell_splits[0].endswith('.yml')))
    invalid_reason = ''
    try:
        with open(entrypoint, 'r', encoding='utf-8') as f:
            try:
                config = list(yaml_utils.safe_load_all(f))
                if config:
                    # FIXME(zongheng): in a chain DAG YAML it only returns the
                    # first section. OK for downstream but is weird.
                    result = config[0]
                else:
                    result = {}
                if isinstance(result, str):
                    # 'sky exec cluster ./my_script.sh'
                    is_yaml = False
            except yaml.YAMLError as e:
                if yaml_file_provided:
                    logger.debug(e)
                    detailed_error = f'\nYAML Error: {e}\n'
                    invalid_reason = ('contains an invalid configuration. '
                                      'Please check syntax.\n'
                                      f'{detailed_error}')
                is_yaml = False

    except OSError:
        if yaml_file_provided:
            entry_point_path = os.path.expanduser(entrypoint)
            if not os.path.exists(entry_point_path):
                invalid_reason = ('does not exist. Please check if the path'
                                  ' is correct.')
            elif not os.path.isfile(entry_point_path):
                invalid_reason = ('is not a file. Please check if the path'
                                  ' is correct.')
            else:
                invalid_reason = ('yaml.safe_load() failed. Please check if the'
                                  ' path is correct.')
        is_yaml = False
    return is_yaml, result, yaml_file_provided, invalid_reason


def _check_yaml(entrypoint: str) -> Tuple[bool, Optional[Dict[str, Any]]]:
    """Checks if entrypoint is a readable YAML file.

    Args:
        entrypoint: Path to a YAML file.
    """
    is_yaml, result, yaml_file_provided, invalid_reason = _check_yaml_only(
        entrypoint)
    if not is_yaml:
        if yaml_file_provided:
            click.confirm(
                f'{entrypoint!r} looks like a yaml path but {invalid_reason}\n'
                'It will be treated as a command to be run remotely. Continue?',
                abort=True)
    return is_yaml, result


def _pop_and_ignore_fields_in_override_params(
        params: Dict[str, Any], field_to_ignore: List[str]) -> None:
    """Pops and ignores fields in override params.

    Args:
        params: Override params.
        field_to_ignore: Fields to ignore.

    Returns:
        Override params with fields ignored.
    """
    if field_to_ignore is not None:
        for field in field_to_ignore:
            field_value = params.pop(field, None)
            if field_value is not None:
                click.secho(f'Override param {field}={field_value} is ignored.',
                            fg='yellow')


def _make_task_or_dag_from_entrypoint_with_overrides(
    entrypoint: Tuple[str, ...],
    *,
    name: Optional[str] = None,
    workdir: Optional[str] = None,
    cloud: Optional[str] = None,
    region: Optional[str] = None,
    zone: Optional[str] = None,
    gpus: Optional[str] = None,
    cpus: Optional[str] = None,
    memory: Optional[str] = None,
    instance_type: Optional[str] = None,
    num_nodes: Optional[int] = None,
    use_spot: Optional[bool] = None,
    image_id: Optional[str] = None,
    disk_size: Optional[int] = None,
    disk_tier: Optional[str] = None,
    network_tier: Optional[str] = None,
    ports: Optional[Tuple[str, ...]] = None,
    env: Optional[List[Tuple[str, str]]] = None,
    secret: Optional[List[Tuple[str, str]]] = None,
    field_to_ignore: Optional[List[str]] = None,
    # job launch specific
    job_recovery: Optional[str] = None,
    config_override: Optional[Dict[str, Any]] = None,
    git_url: Optional[str] = None,
    git_ref: Optional[str] = None,
) -> Union['task_lib.Task', 'dag_lib.Dag']:
    """Creates a task or a dag from an entrypoint with overrides.

    Returns:
        A dag iff the entrypoint is YAML and contains more than 1 task.
        Otherwise, a task.
    """
    if git_url is not None and workdir is not None:
        raise click.UsageError('Cannot specify both --git-url and --workdir')

    entrypoint = ' '.join(entrypoint)
    is_yaml, _ = _check_yaml(entrypoint)
    entrypoint: Optional[str]
    if is_yaml:
        # Treat entrypoint as a yaml.
        click.secho('YAML to run: ', fg='cyan', nl=False)
        click.secho(entrypoint)
    else:
        if not entrypoint:
            entrypoint = None
        else:
            # Treat entrypoint as a bash command.
            click.secho('Command to run: ', fg='cyan', nl=False)
            click.secho(entrypoint)

    override_params = _parse_override_params(cloud=cloud,
                                             region=region,
                                             zone=zone,
                                             gpus=gpus,
                                             cpus=cpus,
                                             memory=memory,
                                             instance_type=instance_type,
                                             use_spot=use_spot,
                                             image_id=image_id,
                                             disk_size=disk_size,
                                             disk_tier=disk_tier,
                                             network_tier=network_tier,
                                             ports=ports,
                                             config_override=config_override)
    if field_to_ignore is not None:
        _pop_and_ignore_fields_in_override_params(override_params,
                                                  field_to_ignore)

    if is_yaml:
        assert entrypoint is not None
        usage_lib.messages.usage.update_user_task_yaml(entrypoint)
        dag = dag_utils.load_chain_dag_from_yaml(entrypoint,
                                                 env_overrides=env,
                                                 secret_overrides=secret)
        if len(dag.tasks) > 1:
            # When the dag has more than 1 task. It is unclear how to
            # override the params for the dag. So we just ignore the
            # override params.
            if override_params:
                click.secho(
                    f'WARNING: override params {override_params} are ignored, '
                    'since the yaml file contains multiple tasks.',
                    fg='yellow')
            return dag
        assert len(dag.tasks) == 1, (
            f'If you see this, please file an issue; tasks: {dag.tasks}')
        task = dag.tasks[0]
    else:
        task = task_lib.Task(name='sky-cmd', run=entrypoint)
        task.set_resources({resources_lib.Resources()})
        # env update has been done for DAG in load_chain_dag_from_yaml for YAML.
        task.update_envs(env)
        task.update_secrets(secret)

    # Update the workdir config from the command line parameters.
    # And update the envs and secrets from the workdir.
    task.update_workdir(workdir, git_url, git_ref)
    task.update_envs_and_secrets_from_workdir()

    # job launch specific.
    if job_recovery is not None:
        override_params['job_recovery'] = job_recovery

    task.set_resources_override(override_params)

    if num_nodes is not None:
        task.num_nodes = num_nodes
    if name is not None:
        task.name = name
    return task


class _NaturalOrderGroup(click.Group):
    """Lists commands in the order defined in this script.

    Reference: https://github.com/pallets/click/issues/513
    """

    def list_commands(self, ctx):  # pylint: disable=unused-argument
        # Preserve definition order but hide aliases (same command object) and
        # commands explicitly marked as hidden.
        seen_commands = set()
        names = []
        for name, command in self.commands.items():
            if getattr(command, 'hidden', False):
                continue
            command_id = id(command)
            if command_id in seen_commands:
                continue
            seen_commands.add(command_id)
            names.append(name)
        return names

    @usage_lib.entrypoint('sky.cli', fallback=True)
    def invoke(self, ctx):
        return super().invoke(ctx)


class _DocumentedCodeCommand(click.Command):
    """Corrects help strings for documented commands such that --help displays
    properly and code blocks are rendered in the official web documentation.
    """

    def get_help(self, ctx):
        help_str = ctx.command.help
        ctx.command.help = help_str.replace('.. code-block:: bash\n', '\b')
        return super().get_help(ctx)


@click.group(cls=_NaturalOrderGroup, context_settings=_CONTEXT_SETTINGS)
@click.option('--install-shell-completion',
              type=click.Choice(['bash', 'zsh', 'fish', 'auto']),
              callback=_install_shell_completion,
              expose_value=False,
              is_eager=True,
              help='Install shell completion for the specified shell.')
@click.option('--uninstall-shell-completion',
              type=click.Choice(['bash', 'zsh', 'fish', 'auto']),
              callback=_uninstall_shell_completion,
              expose_value=False,
              is_eager=True,
              help='Uninstall shell completion for the specified shell.')
@click.version_option(sky.__version__, '--version', '-v', prog_name='skypilot')
@click.version_option(sky.__commit__,
                      '--commit',
                      '-c',
                      prog_name='skypilot',
                      message='%(prog)s, commit %(version)s',
                      help='Show the commit hash and exit')
@annotations.client_api
def cli():
    pass


def _handle_infra_cloud_region_zone_options(infra: Optional[str],
                                            cloud: Optional[str],
                                            region: Optional[str],
                                            zone: Optional[str]):
    """Handle the backward compatibility for --infra and --cloud/region/zone.

    Returns:
        cloud, region, zone
    """
    if cloud is not None or region is not None or zone is not None:
        click.secho(
            'The --cloud, --region, and --zone options are deprecated. '
            'Use --infra instead.',
            fg='yellow')
        if infra is not None:
            with ux_utils.print_exception_no_traceback():
                raise ValueError('Cannot specify both --infra and '
                                 '--cloud, --region, or --zone.')

    if infra is not None:
        infra_info = infra_utils.InfraInfo.from_str(infra)
        # Convert None to '*' to ensure proper override behavior
        cloud = infra_info.cloud if infra_info.cloud is not None else '*'
        region = infra_info.region if infra_info.region is not None else '*'
        zone = infra_info.zone if infra_info.zone is not None else '*'
    return cloud, region, zone


@cli.command(cls=_DocumentedCodeCommand)
@flags.config_option(expose_value=True)
@click.argument('entrypoint',
                required=False,
                type=str,
                nargs=-1,
                **_get_shell_complete_args(_complete_file_name))
@click.option('--cluster',
              '-c',
              default=None,
              type=str,
              **_get_shell_complete_args(_complete_cluster_name),
              help=_CLUSTER_FLAG_HELP)
@click.option('--dryrun',
              default=False,
              is_flag=True,
              help='If True, do not actually run the job.')
@click.option(
    '--detach-run',
    '-d/-no-d',
    default=False,
    is_flag=True,
    help=('If True, as soon as a job is submitted, return from this call '
          'and do not stream execution logs.'))
@click.option('--docker',
              'backend_name',
              flag_value=backends.LocalDockerBackend.NAME,
              default=False,
              hidden=True,
              help=('(Deprecated) Local docker support is deprecated. '
                    'To run locally, create a local Kubernetes cluster with '
                    '``sky local up``.'))
@_add_click_options(flags.TASK_OPTIONS_WITH_NAME +
                    flags.EXTRA_RESOURCES_OPTIONS + flags.COMMON_OPTIONS)
@click.option(
    '--idle-minutes-to-autostop',
    '-i',
    default=None,
    type=int,
    required=False,
    help=('Automatically stop the cluster after this many minutes '
          'of idleness, i.e., no running or pending jobs in the cluster\'s job '
          'queue. Idleness gets reset depending on the ``--wait-for`` flag. '
          'Setting this flag is equivalent to '
          'running ``sky launch -d ...`` and then ``sky autostop -i <minutes>``'
          '. If not set, the cluster will not be autostopped.'))
@flags.wait_for_option('idle-minutes-to-autostop')
@click.option(
    '--down',
    default=False,
    is_flag=True,
    required=False,
    help=
    ('Autodown the cluster: tear down the cluster after all jobs finish '
     '(successfully or abnormally). If --idle-minutes-to-autostop is also set, '
     'the cluster will be torn down after the specified idle time. '
     'Note that if errors occur during provisioning/data syncing/setting up, '
     'the cluster will not be torn down for debugging purposes.'),
)
@click.option(
    '--retry-until-up',
    '-r',
    default=False,
    is_flag=True,
    required=False,
    help=('Whether to retry provisioning infinitely until the cluster is up, '
          'if we fail to launch the cluster on any possible region/cloud due '
          'to unavailability errors.'),
)
@click.option(
    '--yes',
    '-y',
    is_flag=True,
    default=False,
    required=False,
    # Disabling quote check here, as there seems to be a bug in pylint,
    # which incorrectly recognizes the help string as a docstring.
    # pylint: disable=bad-docstring-quotes
    help='Skip confirmation prompt.')
@click.option('--no-setup',
              is_flag=True,
              default=False,
              required=False,
              help='Skip setup phase when (re-)launching cluster.')
@click.option(
    '--clone-disk-from',
    '--clone',
    default=None,
    type=str,
    **_get_shell_complete_args(_complete_cluster_name),
    help=('[Experimental] Clone disk from an existing cluster to launch '
          'a new one. This is useful when the new cluster needs to have '
          'the same data on the boot disk as an existing cluster.'))
@click.option(
    '--fast',
    is_flag=True,
    default=False,
    required=False,
    help=('[Experimental] If the cluster is already up and available, skip '
          'provisioning and setup steps.'))
@click.option('--git-url', type=str, help='Git repository URL.')
@click.option('--git-ref',
              type=str,
              help='Git reference (branch, tag, or commit hash) to use.')
@usage_lib.entrypoint
def launch(
    entrypoint: Tuple[str, ...],
    cluster: Optional[str],
    dryrun: bool,
    detach_run: bool,
    backend_name: Optional[str],
    name: Optional[str],
    workdir: Optional[str],
    infra: Optional[str],
    cloud: Optional[str],
    region: Optional[str],
    zone: Optional[str],
    gpus: Optional[str],
    cpus: Optional[str],
    memory: Optional[str],
    instance_type: Optional[str],
    num_nodes: Optional[int],
    use_spot: Optional[bool],
    image_id: Optional[str],
    env_file: Optional[Dict[str, str]],
    env: List[Tuple[str, str]],
    secret: List[Tuple[str, str]],
    disk_size: Optional[int],
    disk_tier: Optional[str],
    network_tier: Optional[str],
    ports: Tuple[str, ...],
    idle_minutes_to_autostop: Optional[int],
    wait_for: Optional[str],
    down: bool,  # pylint: disable=redefined-outer-name
    retry_until_up: bool,
    yes: bool,
    no_setup: bool,
    clone_disk_from: Optional[str],
    fast: bool,
    async_call: bool,
    config_override: Optional[Dict[str, Any]] = None,
    git_url: Optional[str] = None,
    git_ref: Optional[str] = None,
):
    """Launch a cluster or task.

    If ENTRYPOINT points to a valid YAML file, it is read in as the task
    specification. Otherwise, it is interpreted as a bash command.

    In both cases, the commands are run under the task's workdir (if specified)
    and they undergo job queue scheduling.
    """
    # NOTE(dev): Keep the docstring consistent between the Python API and CLI.
    # TODO(zhwu): the current --async is a bit inconsistent with the direct
    # sky launch, as `sky api logs` does not contain the logs for the actual job
    # submitted, while the synchronous way of `sky launch` does. We should
    # consider having the job logs available in `sky api logs` as well.
    # Reason for not doing it right now: immediately tailing the logs for the
    # job can take up resources on the API server. When there are a lot of
    # `launch` submitted asynchronously, the log tailing may overwhelm the API
    # server, if the jobs are long running.
    env = _merge_env_vars(env_file, env)
    controller_utils.check_cluster_name_not_controller(
        cluster, operation_str='Launching tasks on it')
    if backend_name is None:
        backend_name = backends.CloudVmRayBackend.NAME

    cloud, region, zone = _handle_infra_cloud_region_zone_options(
        infra, cloud, region, zone)

    task_or_dag = _make_task_or_dag_from_entrypoint_with_overrides(
        entrypoint=entrypoint,
        name=name,
        workdir=workdir,
        cloud=cloud,
        region=region,
        zone=zone,
        gpus=gpus,
        cpus=cpus,
        memory=memory,
        instance_type=instance_type,
        num_nodes=num_nodes,
        use_spot=use_spot,
        image_id=image_id,
        env=env,
        secret=secret,
        disk_size=disk_size,
        disk_tier=disk_tier,
        network_tier=network_tier,
        ports=ports,
        config_override=config_override,
        git_url=git_url,
        git_ref=git_ref,
    )
    if isinstance(task_or_dag, dag_lib.Dag):
        raise click.UsageError(
            _DAG_NOT_SUPPORTED_MESSAGE.format(command='sky launch'))
    task = task_or_dag

    backend: backends.Backend
    if backend_name == backends.LocalDockerBackend.NAME:
        backend = backends.LocalDockerBackend()
        click.secho(
            'WARNING: LocalDockerBackend is deprecated and will be '
            'removed in a future release. To run locally, create a local '
            'Kubernetes cluster with `sky local up`.',
            fg='yellow')
    elif backend_name == backends.CloudVmRayBackend.NAME:
        backend = backends.CloudVmRayBackend()
    else:
        with ux_utils.print_exception_no_traceback():
            raise ValueError(f'{backend_name} backend is not supported.')

    if task.service is not None:
        noun = 'pool' if task.service.pool else 'service'
        capnoun = noun.capitalize()
        sysname = 'Pool' if task.service.pool else 'SkyServe'
        cmd = 'sky jobs pool apply' if task.service.pool else 'sky serve up'
        logger.info(
            f'{colorama.Fore.YELLOW}{capnoun} section will be ignored when '
            f'using `sky launch`. {colorama.Style.RESET_ALL}\n'
            f'{colorama.Fore.YELLOW}To spin up a {noun}, use {sysname} CLI: '
            f'{colorama.Style.RESET_ALL}{colorama.Style.BRIGHT}{cmd}'
            f'{colorama.Style.RESET_ALL}')

    request_id = sdk.launch(
        task,
        dryrun=dryrun,
        cluster_name=cluster,
        backend=backend,
        idle_minutes_to_autostop=idle_minutes_to_autostop,
        wait_for=autostop_lib.AutostopWaitFor.from_str(wait_for)
        if wait_for is not None else None,
        down=down,
        retry_until_up=retry_until_up,
        no_setup=no_setup,
        clone_disk_from=clone_disk_from,
        fast=fast,
        _need_confirmation=not yes,
    )
    job_id_handle = _async_call_or_wait(request_id, async_call, 'sky.launch')
    if not async_call:
        job_id, handle = job_id_handle
        if not handle:
            assert dryrun, 'handle should only be None when dryrun is true'
            return
        # Add ssh config for the cluster
        _get_cluster_records_and_set_ssh_config(
            clusters=[handle.get_cluster_name()])
        # job_id will be None if no job was submitted (e.g. no entrypoint
        # provided)
        returncode = 0
        if not detach_run and job_id is not None:
            returncode = sdk.tail_logs(handle.get_cluster_name(),
                                       job_id,
                                       follow=True)
        click.secho(
            ux_utils.command_hint_messages(ux_utils.CommandHintType.CLUSTER_JOB,
                                           job_id, handle.get_cluster_name()))
        sys.exit(returncode)


@cli.command(cls=_DocumentedCodeCommand)
@flags.config_option(expose_value=True)
@click.argument('cluster',
                required=False,
                type=str,
                **_get_shell_complete_args(_complete_cluster_name))
@click.option(
    '--cluster',
    '-c',
    'cluster_option',
    hidden=True,
    type=str,
    help='This is the same as the positional argument, just for consistency.',
    **_get_shell_complete_args(_complete_cluster_name))
@click.argument('entrypoint',
                required=False,
                type=str,
                nargs=-1,
                **_get_shell_complete_args(_complete_file_name))
@click.option(
    '--detach-run',
    '-d',
    default=False,
    is_flag=True,
    help=('If True, as soon as a job is submitted, return from this call '
          'and do not stream execution logs.'))
@_add_click_options(flags.TASK_OPTIONS_WITH_NAME +
                    flags.EXTRA_RESOURCES_OPTIONS + flags.COMMON_OPTIONS)
@click.option('--git-url', type=str, help='Git repository URL.')
@click.option('--git-ref',
              type=str,
              help='Git reference (branch, tag, or commit hash) to use.')
@usage_lib.entrypoint
# pylint: disable=redefined-builtin
def exec(
    cluster: Optional[str],
    cluster_option: Optional[str],
    entrypoint: Tuple[str, ...],
    detach_run: bool,
    name: Optional[str],
    infra: Optional[str],
    cloud: Optional[str],
    region: Optional[str],
    zone: Optional[str],
    workdir: Optional[str],
    gpus: Optional[str],
    ports: Tuple[str],
    instance_type: Optional[str],
    num_nodes: Optional[int],
    use_spot: Optional[bool],
    image_id: Optional[str],
    env_file: Optional[Dict[str, str]],
    env: List[Tuple[str, str]],
    secret: List[Tuple[str, str]],
    cpus: Optional[str],
    memory: Optional[str],
    disk_size: Optional[int],
    disk_tier: Optional[str],
    network_tier: Optional[str],
    async_call: bool,
    config_override: Optional[Dict[str, Any]] = None,
    git_url: Optional[str] = None,
    git_ref: Optional[str] = None,
):
    # NOTE(dev): Keep the docstring consistent between the Python API and CLI.
    """Execute a task or command on an existing cluster.

    If ENTRYPOINT points to a valid YAML file, it is read in as the task
    specification. Otherwise, it is interpreted as a bash command.

    Actions performed by ``sky exec``:

    1. Workdir syncing, if:

       - ENTRYPOINT is a YAML with the ``workdir`` field specified; or

       - Flag ``--workdir=<local_dir>`` is set.

    2. Executing the specified task's ``run`` commands / the bash command.

    ``sky exec`` is thus typically faster than ``sky launch``, provided a
    cluster already exists.

    All setup steps (provisioning, setup commands, file mounts syncing) are
    skipped.  If any of those specifications changed, this command will not
    reflect those changes.  To ensure a cluster's setup is up to date, use ``sky
    launch`` instead.

    Execution and scheduling behavior:

    - The task/command will undergo job queue scheduling, respecting any
      specified resource requirement. It can be executed on any node of the
      cluster with enough resources.

    - The task/command is run under the workdir (if specified).

    - The task/command is run non-interactively (without a pseudo-terminal or
      pty), so interactive commands such as ``htop`` do not work. Use ``ssh
      my_cluster`` instead.

    Typical workflow:

    .. code-block:: bash

      # First command: set up the cluster once.
      sky launch -c mycluster app.yaml
      \b
      # For iterative development, simply execute the task on the launched
      # cluster.
      sky exec mycluster app.yaml
      \b
      # Do "sky launch" again if anything other than Task.run is modified:
      sky launch -c mycluster app.yaml
      \b
      # Pass in commands for execution.
      sky exec mycluster python train_cpu.py
      sky exec mycluster --gpus=V100:1 python train_gpu.py
      \b
      # Pass environment variables to the task.
      sky exec mycluster --env WANDB_API_KEY python train_gpu.py

    """
    if cluster_option is None and cluster is None:
        raise click.UsageError('Missing argument \'[CLUSTER]\' and '
                               '\'[ENTRYPOINT]...\'')
    if cluster_option is not None:
        if cluster is not None:
            entrypoint = (cluster,) + entrypoint
        cluster = cluster_option
    if not entrypoint:
        raise click.UsageError('Missing argument \'[ENTRYPOINT]...\'')
    assert cluster is not None, (cluster, cluster_option, entrypoint)

    env = _merge_env_vars(env_file, env)
    controller_utils.check_cluster_name_not_controller(
        cluster, operation_str='Executing task on it')

    cloud, region, zone = _handle_infra_cloud_region_zone_options(
        infra, cloud, region, zone)

    task_or_dag = _make_task_or_dag_from_entrypoint_with_overrides(
        entrypoint=entrypoint,
        name=name,
        workdir=workdir,
        cloud=cloud,
        region=region,
        zone=zone,
        gpus=gpus,
        cpus=cpus,
        memory=memory,
        instance_type=instance_type,
        use_spot=use_spot,
        image_id=image_id,
        num_nodes=num_nodes,
        env=env,
        secret=secret,
        disk_size=disk_size,
        disk_tier=disk_tier,
        network_tier=network_tier,
        ports=ports,
        field_to_ignore=['cpus', 'memory', 'disk_size', 'disk_tier', 'ports'],
        config_override=config_override,
        git_url=git_url,
        git_ref=git_ref,
    )

    if isinstance(task_or_dag, dag_lib.Dag):
        raise click.UsageError('YAML specifies a DAG, while `sky exec` '
                               'supports a single task only.')
    task = task_or_dag

    click.secho('Submitting job to cluster: ', fg='cyan', nl=False)
    click.secho(cluster)
    request_id = sdk.exec(task, cluster_name=cluster)
    job_id_handle = _async_call_or_wait(request_id, async_call, 'sky.exec')
    if not async_call and not detach_run:
        job_id, _ = job_id_handle
        returncode = sdk.tail_logs(cluster, job_id, follow=True)
        sys.exit(returncode)


def _handle_jobs_queue_request(
    request_id: server_common.RequestId[Union[
        List[responses.ManagedJobRecord],
        Tuple[List[responses.ManagedJobRecord], int, Dict[str, int], int]]],
    show_all: bool,
    show_user: bool,
    max_num_jobs_to_show: Optional[int],
    pool_status_request_id: Optional[server_common.RequestId[List[Dict[
        str, Any]]]] = None,
    is_called_by_user: bool = False,
    only_in_progress: bool = False,
    queue_result_version: cli_utils.QueueResultVersion = cli_utils.
    QueueResultVersion.V1,
) -> Tuple[Optional[int], str]:
    """Get the in-progress managed jobs.

    Args:
        request_id: The request ID for managed jobs.
        pool_status_request_id: The request ID for pool status, or None.
        show_all: Show all information of each job (e.g., region, price).
        show_user: Show the user who submitted the job.
        max_num_jobs_to_show: If not None, limit the number of jobs to show to
            this number, which is mainly used by `sky status`
            and `sky jobs queue`.
        is_called_by_user: If this function is called by user directly, or an
            internal call.
        only_in_progress: If True, only return the number of in-progress jobs.
        queue_result_version: The version of the queue result.

    Returns:
        A tuple of (num_in_progress_jobs, msg). If num_in_progress_jobs is None,
        it means there is an error when querying the managed jobs. In this case,
        msg contains the error message. Otherwise, msg contains the formatted
        managed job table.
    """
    # TODO(SKY-980): remove unnecessary fallbacks on the client side.
    num_in_progress_jobs = None
    msg = ''
    status_counts: Optional[Dict[str, int]] = None
    pool_status_result = None
    try:
        if not is_called_by_user:
            usage_lib.messages.usage.set_internal()
        # Call both stream_and_get functions in parallel
        def get_jobs_queue_result():
            return sdk.stream_and_get(request_id)

        def get_pool_status_result():
            if pool_status_request_id is not None:
                try:
                    return sdk.stream_and_get(pool_status_request_id)
                except Exception:  # pylint: disable=broad-except
                    # If getting pool status fails, just continue without it
                    return None
            return None

        with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
            jobs_future = executor.submit(get_jobs_queue_result)
            pool_status_future = executor.submit(get_pool_status_result)

            result = jobs_future.result()
            pool_status_result = pool_status_future.result()

        if queue_result_version.v2():
            managed_jobs_, total, status_counts, _ = result
            if only_in_progress:
                num_in_progress_jobs = 0
                if status_counts:
                    for status_value, count in status_counts.items():
                        status_enum = managed_jobs.ManagedJobStatus(
                            status_value)
                        if not status_enum.is_terminal():
                            num_in_progress_jobs += count
            else:
                num_in_progress_jobs = total
        else:
            managed_jobs_ = result
            num_in_progress_jobs = len(
                set(job['job_id'] for job in managed_jobs_))
    except exceptions.ClusterNotUpError as e:
        controller_status = e.cluster_status
        msg = str(e)
        if controller_status is None:
            msg += (f' (See: {colorama.Style.BRIGHT}sky jobs -h'
                    f'{colorama.Style.RESET_ALL})')
        elif (controller_status == status_lib.ClusterStatus.STOPPED and
              is_called_by_user):
            msg += (f' (See finished managed jobs: {colorama.Style.BRIGHT}'
                    f'sky jobs queue --refresh{colorama.Style.RESET_ALL})')
    except RuntimeError as e:
        try:
            # Check the controller status again, as the RuntimeError is likely
            # due to the controller being autostopped when querying the jobs.
            # Since we are client-side, we may not know the exact name of the
            # controller, so use the prefix with a wildcard.
            # Query status of the controller cluster.
            records = sdk.get(
                sdk.status(cluster_names=[common.JOB_CONTROLLER_PREFIX + '*'],
                           all_users=True))
            if (not records or
                    records[0]['status'] == status_lib.ClusterStatus.STOPPED):
                controller = controller_utils.Controllers.JOBS_CONTROLLER.value
                msg = controller.default_hint_if_non_existent
        except Exception:  # pylint: disable=broad-except
            # This is to an best effort to find the latest controller status to
            # print more helpful message, so we can ignore any exception to
            # print the original error.
            pass
        if not msg:
            msg = (
                'Failed to query managed jobs due to connection '
                'issues. Try again later. '
                f'Details: {common_utils.format_exception(e, use_bracket=True)}'
            )
    except Exception as e:  # pylint: disable=broad-except
        msg = ''
        if env_options.Options.SHOW_DEBUG_INFO.get():
            msg += traceback.format_exc()
            msg += '\n'
        msg += ('Failed to query managed jobs: '
                f'{common_utils.format_exception(e, use_bracket=True)}')
    else:
        msg = table_utils.format_job_table(
            managed_jobs_,
            pool_status=pool_status_result,
            show_all=show_all,
            show_user=show_user,
            max_jobs=max_num_jobs_to_show,
            status_counts=status_counts,
        )
    return num_in_progress_jobs, msg


def _handle_services_request(
    request_id: server_common.RequestId[List[Dict[str, Any]]],
    service_names: Optional[List[str]],
    show_all: bool,
    show_endpoint: bool,
    pool: bool = False,  # pylint: disable=redefined-outer-name
    is_called_by_user: bool = False
) -> Tuple[Optional[int], str]:
    """Get service statuses.

    Args:
        service_names: If not None, only show the statuses of these services.
        show_all: Show all information of each service.
        show_endpoint: If True, only show the endpoint of the service.
        pool: If True, the request is for a pool. Otherwise for a service.
        is_called_by_user: If this function is called by user directly, or an
            internal call.

    Returns:
        A tuple of (num_services, msg). If num_services is None, it means there
        is an error when querying the services. In this case, msg contains the
        error message. Otherwise, msg contains the formatted service table.
    """
    noun = 'pool' if pool else 'service'
    num_services = None
    try:
        if not is_called_by_user:
            usage_lib.messages.usage.set_internal()
        service_records = sdk.get(request_id)
        num_services = len(service_records)
    except exceptions.ClusterNotUpError as e:
        controller_status = e.cluster_status
        msg = str(e)
        if controller_status is None:
            msg += (f' (See: {colorama.Style.BRIGHT}sky serve -h'
                    f'{colorama.Style.RESET_ALL})')
    except RuntimeError as e:
        msg = ''
        try:
            # Check the controller status again, as the RuntimeError is likely
            # due to the controller being autostopped when querying the
            # services.
            # Since we are client-side, we may not know the exact name of the
            # controller, so use the prefix with a wildcard.
            # Query status of the controller cluster.
            records = sdk.get(
                sdk.status(
                    cluster_names=[common.SKY_SERVE_CONTROLLER_PREFIX + '*'],
                    all_users=True))
            if (not records or
                    records[0]['status'] == status_lib.ClusterStatus.STOPPED):
                controller = (
                    controller_utils.Controllers.SKY_SERVE_CONTROLLER.value)
                msg = controller.default_hint_if_non_existent
        except Exception:  # pylint: disable=broad-except
            # This is to an best effort to find the latest controller status to
            # print more helpful message, so we can ignore any exception to
            # print the original error.
            pass
        if not msg:
            msg = (f'Failed to fetch {noun} statuses due to connection issues. '
                   'Please try again later. Details: '
                   f'{common_utils.format_exception(e, use_bracket=True)}')
    except Exception as e:  # pylint: disable=broad-except
        msg = (f'Failed to fetch {noun} statuses: '
               f'{common_utils.format_exception(e, use_bracket=True)}')
    else:
        if show_endpoint:
            if len(service_records) != 1:
                plural = 's' if len(service_records) > 1 else ''
                service_num = (str(len(service_records))
                               if service_records else 'No')
                raise click.UsageError(
                    f'{service_num} service{plural} found. Please specify '
                    'an existing service to show its endpoint. Usage: '
                    'sky serve status --endpoint <service-name>')
            endpoint = service_records[0]['endpoint']
            msg = '-' if endpoint is None else endpoint
        else:
            msg = serve_lib.format_service_table(service_records, show_all,
                                                 pool)
            service_not_found_msg = ''
            if service_names is not None:
                for service_name in service_names:
                    if not any(service_name == record['name']
                               for record in service_records):
                        service_not_found_msg += (
                            f'\n{noun.capitalize()} '
                            f'{service_name!r} not found.')
            if service_not_found_msg:
                msg += f'\n{service_not_found_msg}'
    return num_services, msg


def _show_endpoint(query_clusters: Optional[List[str]],
                   cluster_records: List[responses.StatusResponse], ip: bool,
                   endpoints: bool, endpoint: Optional[int]) -> None:
    show_endpoints = endpoints or endpoint is not None
    show_single_endpoint = endpoint is not None
    if len(cluster_records) != 1:
        with ux_utils.print_exception_no_traceback():
            plural = 's' if len(cluster_records) > 1 else ''
            if cluster_records:
                cluster_num = str(len(cluster_records))
            else:
                cluster_num = (f'{query_clusters[0]!r}'
                               if query_clusters else 'No')
            verb = 'found' if cluster_records else 'not found'
            cause = 'a single'
            if query_clusters and len(query_clusters) > 1:
                cause = 'an existing'
            raise ValueError(
                _STATUS_PROPERTY_CLUSTER_NUM_ERROR_MESSAGE.format(
                    cluster_num=cluster_num,
                    plural=plural,
                    verb=verb,
                    cause=cause,
                    property='IP address' if ip else 'endpoint(s)',
                    flag='ip' if ip else
                    ('endpoint port' if show_single_endpoint else 'endpoints')))

    cluster_record = cluster_records[0]
    if cluster_record['status'] != status_lib.ClusterStatus.UP:
        with ux_utils.print_exception_no_traceback():
            raise RuntimeError(f'Cluster {cluster_record["name"]!r} '
                               'is not in UP status.')
    handle = cluster_record['handle']
    if not isinstance(handle, backends.CloudVmRayResourceHandle):
        with ux_utils.print_exception_no_traceback():
            raise ValueError('Querying IP address is not supported '
                             'for local clusters.')

    head_ip = handle.external_ips()[0]
    # The endpoint request is relatively fast, so we don't add special handling
    # for keyboard interrupt and abort the request to avoid additional latency.
    if show_endpoints:
        if endpoint:
            request_id = sdk.endpoints(cluster_record['name'], endpoint)
            cluster_endpoints = sdk.stream_and_get(request_id)
            cluster_endpoint = cluster_endpoints.get(endpoint, None)
            if not cluster_endpoint:
                raise click.Abort(f'Endpoint {endpoint} not found for cluster '
                                  f'{cluster_record["name"]!r}.')
            click.echo(cluster_endpoint)
        else:
            request_id = sdk.endpoints(cluster_record['name'])
            cluster_endpoints = sdk.stream_and_get(request_id)
            assert isinstance(cluster_endpoints, dict)
            if not cluster_endpoints:
                raise click.Abort(f'No endpoint found for cluster '
                                  f'{cluster_record["name"]!r}.')
            for port, port_endpoint in cluster_endpoints.items():
                click.echo(f'{colorama.Fore.BLUE}{colorama.Style.BRIGHT}{port}'
                           f'{colorama.Style.RESET_ALL}: '
                           f'{colorama.Fore.CYAN}{colorama.Style.BRIGHT}'
                           f'{port_endpoint}{colorama.Style.RESET_ALL}')
        return
    click.echo(head_ip)
    return


def _show_enabled_infra(
        active_workspace: str, show_workspace: bool,
        enabled_clouds_request_id: server_common.RequestId[List[str]]):
    """Show the enabled infrastructure."""
    workspace_str = ''
    if show_workspace:
        workspace_str = f' (workspace: {active_workspace!r})'
    title = (f'{colorama.Fore.CYAN}{colorama.Style.BRIGHT}Enabled Infra'
             f'{workspace_str}:'
             f'{colorama.Style.RESET_ALL} ')
    all_infras = sdk.get(enabled_clouds_request_id)
    click.echo(f'{title}{", ".join(all_infras)}\n')


@cli.command()
@flags.config_option(expose_value=False)
@flags.verbose_option()
@click.option(
    '--refresh',
    '-r',
    default=False,
    is_flag=True,
    required=False,
    help='Query the latest cluster statuses from the cloud provider(s).')
@click.option('--ip',
              default=False,
              is_flag=True,
              required=False,
              help=('Get the IP address of the head node of a cluster. This '
                    'option will override all other options. For Kubernetes '
                    'clusters, the returned IP address is the internal IP '
                    'of the head pod, and may not be accessible from outside '
                    'the cluster.'))
@click.option('--endpoints',
              default=False,
              is_flag=True,
              required=False,
              help=('Get all exposed endpoints and corresponding URLs for a'
                    'cluster. This option will override all other options.'))
@click.option('--endpoint',
              required=False,
              default=None,
              type=int,
              help=('Get the endpoint URL for the specified port number on the '
                    'cluster. This option will override all other options.'))
@click.option('--show-managed-jobs/--no-show-managed-jobs',
              default=True,
              is_flag=True,
              required=False,
              help='Also show recent in-progress managed jobs, if any.')
@click.option('--show-services/--no-show-services',
              default=True,
              is_flag=True,
              required=False,
              help='Also show sky serve services, if any.')
@click.option('--show-pools/--no-show-pools',
              default=True,
              is_flag=True,
              required=False,
              help='Also show pools, if any.')
@click.argument('clusters',
                required=False,
                type=str,
                nargs=-1,
                **_get_shell_complete_args(_complete_cluster_name))
@flags.all_users_option('Show all clusters, including those not owned by the '
                        'current user.')
@usage_lib.entrypoint
# pylint: disable=redefined-builtin
def status(verbose: bool, refresh: bool, ip: bool, endpoints: bool,
           endpoint: Optional[int], show_managed_jobs: bool,
           show_services: bool, show_pools: bool, clusters: List[str],
           all_users: bool):
    # NOTE(dev): Keep the docstring consistent between the Python API and CLI.
    """Show clusters.

    If CLUSTERS is given, show those clusters. Otherwise, show all clusters.

    If --ip is specified, show the IP address of the head node of the cluster.
    Only available when CLUSTERS contains exactly one cluster, e.g.
    ``sky status --ip mycluster``.

    If --endpoints is specified, show all exposed endpoints in the cluster.
    Only available when CLUSTERS contains exactly one cluster, e.g.
    ``sky status --endpoints mycluster``. To query a single endpoint, you
    can use ``sky status mycluster --endpoint 8888``.

    Running `sky status` will update the ssh config for the clusters locally, so
    that you can directly ssh into the clusters or connect to the clusters with
    vscode.

    The following fields for each cluster are recorded: cluster name, time
    since last launch, resources, region, zone, hourly price, status, autostop,
    command.

    Display all fields using ``sky status -v``.

    Each cluster can have one of the following statuses:

    - ``INIT``: The cluster may be live or down. It can happen in the following
      cases:

      - Ongoing provisioning or runtime setup. (A ``sky launch`` has started
        but has not completed.)

      - Or, the cluster is in an abnormal state, e.g., some cluster nodes are
        down, or the SkyPilot runtime is unhealthy. (To recover the cluster,
        try ``sky launch`` again on it.)

    - ``UP``: Provisioning and runtime setup have succeeded and the cluster is
      live.  (The most recent ``sky launch`` has completed successfully.)

    - ``STOPPED``: The cluster is stopped and the storage is persisted. Use
      ``sky start`` to restart the cluster.

    Autostop column:

    - Indicates after how many minutes of idleness (no in-progress jobs) the
      cluster will be autostopped. '-' means disabled.

    - If the time is followed by '(down)', e.g., '1m (down)', the cluster will
      be autodowned, rather than autostopped.

    Getting up-to-date cluster statuses:

    - In normal cases where clusters are entirely managed by SkyPilot (i.e., no
      manual operations in cloud consoles) and no autostopping is used, the
      table returned by this command will accurately reflect the cluster
      statuses.

    - In cases where clusters are changed outside of SkyPilot (e.g., manual
      operations in cloud consoles; unmanaged spot clusters getting preempted)
      or for autostop-enabled clusters, use ``--refresh`` to query the latest
      cluster statuses from the cloud providers.
    """
    # Do not show job queue if user specifies clusters, and if user
    # specifies --ip or --endpoint(s).
    show_managed_jobs = show_managed_jobs and not any([clusters, ip, endpoints])
    show_endpoints = endpoints or endpoint is not None
    show_single_endpoint = endpoint is not None
    show_services = show_services and not any([clusters, ip, endpoints])

    query_clusters: Optional[List[str]] = None if not clusters else clusters
    refresh_mode = common.StatusRefreshMode.NONE
    if refresh:
        refresh_mode = common.StatusRefreshMode.FORCE

    # Phase 1: Validate arguments for IP/endpoint queries
    if ip or show_endpoints:
        if refresh:
            raise click.UsageError(
                'Using --ip or --endpoint(s) with --refresh is not'
                'supported for now. To fix, refresh first, '
                'then query the IP or endpoint.')

        if ip and show_endpoints:
            with ux_utils.print_exception_no_traceback():
                raise ValueError('Cannot specify both --ip and --endpoint(s) '
                                 'at the same time.')

        if endpoint is not None and endpoints:
            with ux_utils.print_exception_no_traceback():
                raise ValueError(
                    'Cannot specify both --endpoint and --endpoints '
                    'at the same time.')

        if len(clusters) != 1:
            with ux_utils.print_exception_no_traceback():
                plural = 's' if len(clusters) > 1 else ''
                cluster_num = (str(len(clusters)) if clusters else 'No')
                cause = 'a single' if len(clusters) > 1 else 'an existing'
                raise ValueError(
                    _STATUS_PROPERTY_CLUSTER_NUM_ERROR_MESSAGE.format(
                        cluster_num=cluster_num,
                        plural=plural,
                        verb='specified',
                        cause=cause,
                        property='IP address' if ip else 'endpoint(s)',
                        flag='ip' if ip else
                        ('endpoint port'
                         if show_single_endpoint else 'endpoints')))

    # Phase 2: Parallel submission of all API requests
    def submit_managed_jobs():
        fields = _DEFAULT_MANAGED_JOB_FIELDS_TO_GET
        if all_users:
            fields = fields + _USER_NAME_FIELD
        return cli_utils.get_managed_job_queue(
            refresh=False,
            skip_finished=True,
            all_users=all_users,
            fields=fields,
            limit=_NUM_MANAGED_JOBS_TO_SHOW_IN_STATUS,
        )

    def submit_services(
    ) -> Optional[server_common.RequestId[List[Dict[str, Any]]]]:
        return serve_lib.status(service_names=None)

    def submit_pools(
    ) -> Optional[server_common.RequestId[List[Dict[str, Any]]]]:
        try:
            return managed_jobs.pool_status(pool_names=None)
        except exceptions.APINotSupportedError as e:
            logger.debug(f'Pools are not supported in the remote server: {e}')
            return None

    def submit_workspace() -> Optional[server_common.RequestId[Dict[str, Any]]]:
        return sdk.workspaces()

    active_workspace = skypilot_config.get_active_workspace()

    def submit_enabled_clouds():
        return sdk.enabled_clouds(workspace=active_workspace, expand=True)

    managed_jobs_queue_request_id = None
    queue_result_version = cli_utils.QueueResultVersion.V1
    service_status_request_id = None
    workspace_request_id = None
    pool_status_request_id = None

    # Submit all requests in parallel
    with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
        if show_managed_jobs:
            managed_jobs_request_future = executor.submit(submit_managed_jobs)
        if show_services:
            services_request_future = executor.submit(submit_services)
        if show_pools:
            pools_request_future = executor.submit(submit_pools)
        if not (ip or show_endpoints):
            workspace_request_future = executor.submit(submit_workspace)
        enabled_clouds_request_future = executor.submit(submit_enabled_clouds)

        # Get the request IDs
        if show_managed_jobs:
            (managed_jobs_queue_request_id,
             queue_result_version) = managed_jobs_request_future.result()
        if show_services:
            service_status_request_id = services_request_future.result()
        if show_pools:
            pool_status_request_id = pools_request_future.result()
        if not (ip or show_endpoints):
            workspace_request_id = workspace_request_future.result()
        enabled_clouds_request_id = enabled_clouds_request_future.result()

    managed_jobs_queue_request_id = (server_common.RequestId()
                                     if not managed_jobs_queue_request_id else
                                     managed_jobs_queue_request_id)
    service_status_request_id = (server_common.RequestId()
                                 if not service_status_request_id else
                                 service_status_request_id)
    pool_status_request_id = (server_common.RequestId()
                              if not pool_status_request_id else
                              pool_status_request_id)

    # Phase 3: Get cluster records and handle special cases
    cluster_records = _get_cluster_records_and_set_ssh_config(
        query_clusters, refresh_mode, all_users, verbose)

    # TOOD(zhwu): setup the ssh config for status
    if ip or show_endpoints:
        _show_endpoint(query_clusters, cluster_records, ip, endpoints, endpoint)
        return
    hints = []
    normal_clusters = []
    controllers = []
    for cluster_record in cluster_records:
        cluster_name = cluster_record['name']
        controller = controller_utils.Controllers.from_name(
            cluster_name, expect_exact_match=False)
        if controller is not None:
            controllers.append(cluster_record)
        else:
            normal_clusters.append(cluster_record)

    if workspace_request_id is not None:
        all_workspaces = sdk.get(workspace_request_id)
    else:
        all_workspaces = {constants.SKYPILOT_DEFAULT_WORKSPACE: {}}
    show_workspace = len(all_workspaces) > 1
    _show_enabled_infra(active_workspace, show_workspace,
                        enabled_clouds_request_id)
    click.echo(f'{colorama.Fore.CYAN}{colorama.Style.BRIGHT}Clusters'
               f'{colorama.Style.RESET_ALL}')

    num_pending_autostop = 0
    num_pending_autostop += status_utils.show_status_table(
        normal_clusters + controllers, verbose, all_users, query_clusters,
        show_workspace)

    managed_jobs_query_interrupted = False
    if show_managed_jobs:
        click.echo(f'\n{colorama.Fore.CYAN}{colorama.Style.BRIGHT}'
                   f'Managed jobs{colorama.Style.RESET_ALL}')
        with rich_utils.client_status('[cyan]Checking managed jobs[/]'):
            try:
                num_in_progress_jobs, msg = _handle_jobs_queue_request(
                    managed_jobs_queue_request_id,
                    pool_status_request_id=pool_status_request_id,
                    show_all=False,
                    show_user=all_users,
                    max_num_jobs_to_show=_NUM_MANAGED_JOBS_TO_SHOW_IN_STATUS,
                    is_called_by_user=False,
                    only_in_progress=True,
                    queue_result_version=queue_result_version,
                )
            except KeyboardInterrupt:
                sdk.api_cancel(managed_jobs_queue_request_id, silent=True)
                managed_jobs_query_interrupted = True
                # Set to -1, so that the controller is not considered
                # down, and the hint for showing sky jobs queue
                # will still be shown.
                num_in_progress_jobs = -1
                msg = 'KeyboardInterrupt'

        click.echo(msg)
        if num_in_progress_jobs is not None:
            # jobs controller is UP.
            job_info = ''
            if num_in_progress_jobs > 0:
                plural_and_verb = ' is'
                if num_in_progress_jobs > 1:
                    plural_and_verb = 's are'
                job_info = (
                    f'{num_in_progress_jobs} managed job{plural_and_verb} '
                    'in progress')
                if num_in_progress_jobs > _NUM_MANAGED_JOBS_TO_SHOW_IN_STATUS:
                    job_info += (
                        f' ({_NUM_MANAGED_JOBS_TO_SHOW_IN_STATUS} latest '
                        'ones shown)')
                job_info += '. '
            hints.append(
                controller_utils.Controllers.JOBS_CONTROLLER.value.
                in_progress_hint(False).format(job_info=job_info))

    if show_pools and pool_status_request_id:
        num_pools = None
        if managed_jobs_query_interrupted:
            msg = 'KeyboardInterrupt'
        else:
            with rich_utils.client_status('[cyan]Checking pools[/]'):
                try:
                    num_pools, msg = _handle_services_request(
                        pool_status_request_id,
                        service_names=None,
                        show_all=False,
                        show_endpoint=False,
                        pool=True,
                        is_called_by_user=False)
                except KeyboardInterrupt:
                    sdk.api_cancel(pool_status_request_id, silent=True)
                    num_pools = -1
                    msg = 'KeyboardInterrupt'
        if num_pools is not None:
            if num_pools > 0:
                click.echo(f'\n{colorama.Fore.CYAN}{colorama.Style.BRIGHT}'
                           f'Pools{colorama.Style.RESET_ALL}')
                click.echo(msg)
                hints.append(
                    controller_utils.Controllers.SKY_SERVE_CONTROLLER.value.
                    in_progress_hint(True))

    if show_services:
        click.echo(f'\n{colorama.Fore.CYAN}{colorama.Style.BRIGHT}'
                   f'Services{colorama.Style.RESET_ALL}')
        num_services = None
        if managed_jobs_query_interrupted:
            msg = 'KeyboardInterrupt'
        else:
            with rich_utils.client_status('[cyan]Checking services[/]'):
                try:
                    num_services, msg = _handle_services_request(
                        service_status_request_id,
                        service_names=None,
                        show_all=False,
                        show_endpoint=False,
                        is_called_by_user=False)
                except KeyboardInterrupt:
                    sdk.api_cancel(service_status_request_id, silent=True)
                    num_services = -1
                    msg = 'KeyboardInterrupt'
        click.echo(msg)
        if num_services is not None:
            hints.append(
                controller_utils.Controllers.SKY_SERVE_CONTROLLER.value.
                in_progress_hint(False))

    if num_pending_autostop > 0 and not refresh:
        # Don't print this hint if there's no pending autostop or user has
        # already passed --refresh.
        plural_and_verb = ' has'
        if num_pending_autostop > 1:
            plural_and_verb = 's have'
        hints.append(f'* {num_pending_autostop} cluster{plural_and_verb} '
                     'auto{stop,down} scheduled. Refresh statuses with: '
                     f'{colorama.Style.BRIGHT}sky status --refresh'
                     f'{colorama.Style.RESET_ALL}')
    if hints:
        click.echo('\n' + '\n'.join(hints))


@cli.command(hidden=True)
@flags.config_option(expose_value=False)
@flags.verbose_option()
def status_kubernetes(verbose: bool):
    """[Experimental] Show all SkyPilot resources (including from other '
    'users) in the current Kubernetes context."""
    all_clusters, unmanaged_clusters, all_jobs, context = (sdk.stream_and_get(
        sdk.status_kubernetes()))
    click.echo(f'{colorama.Fore.CYAN}{colorama.Style.BRIGHT}'
               f'Kubernetes cluster state (context: {context})'
               f'{colorama.Style.RESET_ALL}')
    status_utils.show_kubernetes_cluster_status_table(unmanaged_clusters,
                                                      show_all=verbose)
    if all_jobs:
        click.echo(f'\n{colorama.Fore.CYAN}{colorama.Style.BRIGHT}'
                   f'Managed jobs'
                   f'{colorama.Style.RESET_ALL}')
        msg = table_utils.format_job_table(all_jobs,
                                           show_all=verbose,
                                           show_user=False)
        click.echo(msg)
    if any(['sky-serve-controller' in c.cluster_name for c in all_clusters]):
        # TODO: Parse serve controllers and show services separately.
        #  Currently we show a hint that services are shown as clusters.
        click.echo(f'\n{colorama.Style.DIM}Hint: SkyServe replica pods are '
                   'shown in the "SkyPilot clusters" section.'
                   f'{colorama.Style.RESET_ALL}')


@cli.command()
@flags.config_option(expose_value=False)
@flags.all_option('Show all cluster information.')
@click.option('--days',
              default=30,
              type=int,
              help='Show clusters from the last N days. Default is 30 days. '
              'If set to 0, show all clusters.')
@usage_lib.entrypoint
def cost_report(all: bool, days: int):  # pylint: disable=redefined-builtin
    # NOTE(dev): Keep the docstring consistent between the Python API and CLI.
    """Show estimated costs for launched clusters.

    For each cluster, this shows: cluster name, resources, launched time,
    duration that cluster was up, and total estimated cost.

    The estimated cost column indicates the price for the cluster based on the
    type of resources being used and the duration of use up until now. This
    means if the cluster is UP, successive calls to cost-report will show
    increasing price.

    This CLI is experimental. The estimated cost is calculated based on the
    local cache of the cluster status, and may not be accurate for:

    - Clusters with autostop/use_spot set; or

    - Clusters that were terminated/stopped on the cloud console.
    """
    days_to_query: Optional[int] = days
    if days == 0:
        days_to_query = None
    cluster_records = sdk.get(sdk.cost_report(days=days_to_query))

    normal_cluster_records = []
    controllers = dict()
    for cluster_record in cluster_records:
        cluster_name = cluster_record['name']
        try:
            controller = controller_utils.Controllers.from_name(
                cluster_name, expect_exact_match=False)
        except AssertionError:
            # There could be some old controller clusters from previous
            # versions that we should not show in the cost report.
            logger.debug(f'Cluster {cluster_name} is not a controller cluster.')
            continue
        if controller is not None:
            controller_name = controller.value.name
            # to display most recent entry for each controller cluster
            # TODO(sgurram): fix assumption of sorted order of clusters
            if controller_name not in controllers:
                controllers[controller_name] = cluster_record
        else:
            normal_cluster_records.append(cluster_record)

    total_cost = status_utils.get_total_cost_of_displayed_records(
        normal_cluster_records, all)

    status_utils.show_cost_report_table(normal_cluster_records,
                                        all,
                                        days=days_to_query)
    for controller_name, cluster_record in controllers.items():
        status_utils.show_cost_report_table([cluster_record],
                                            all,
                                            controller_name=controller_name,
                                            days=days_to_query)
        total_cost += cluster_record['total_cost']

    click.echo(f'\n{colorama.Style.BRIGHT}'
               f'Total Cost: ${total_cost:.2f}{colorama.Style.RESET_ALL}')

    if not all:
        click.secho(
            f'Showing up to {status_utils.NUM_COST_REPORT_LINES} '
            'most recent clusters. '
            'To see all clusters in history, '
            'pass the --all flag.',
            fg='yellow')

    click.secho(
        'This feature is experimental. '
        'Costs for clusters with auto{stop,down} '
        'scheduled may not be accurate.',
        fg='yellow')


@cli.command()
@flags.config_option(expose_value=False)
@flags.all_users_option('Show all users\' information in full.')
@click.option('--skip-finished',
              '-s',
              default=False,
              is_flag=True,
              required=False,
              help='Show only pending/running jobs\' information.')
@click.argument('clusters',
                required=False,
                type=str,
                nargs=-1,
                **_get_shell_complete_args(_complete_cluster_name))
@usage_lib.entrypoint
def queue(clusters: List[str], skip_finished: bool, all_users: bool):
    # NOTE(dev): Keep the docstring consistent between the Python API and CLI.
    """Show the job queue for cluster(s)."""
    click.secho('Fetching and parsing job queue...', fg='cyan')
    if not clusters:
        cluster_records = _get_cluster_records_and_set_ssh_config(
            None, all_users=all_users)
        clusters = [cluster['name'] for cluster in cluster_records]

    unsupported_clusters = []
    logger.info(f'Fetching job queue for: {", ".join(clusters)}')
    job_tables = {}

    def _get_job_queue(cluster):
        try:
            job_table = sdk.stream_and_get(
                sdk.queue(cluster, skip_finished, all_users))
        except (RuntimeError, exceptions.CommandError, ValueError,
                exceptions.NotSupportedError, exceptions.ClusterNotUpError,
                exceptions.CloudUserIdentityError,
                exceptions.ClusterOwnerIdentityMismatchError) as e:
            if isinstance(e, exceptions.NotSupportedError):
                unsupported_clusters.append(cluster)
            click.echo(f'{colorama.Fore.YELLOW}Failed to get the job queue for '
                       f'cluster {cluster!r}.{colorama.Style.RESET_ALL}\n'
                       f'  {common_utils.format_exception(e)}')
            return
        job_tables[cluster] = table_utils.format_job_queue(job_table)

    subprocess_utils.run_in_parallel(_get_job_queue, clusters)
    user_str = 'all users' if all_users else 'current user'
    for cluster, job_table in job_tables.items():
        click.echo(f'\nJob queue of {user_str} on cluster {cluster}\n'
                   f'{job_table}')

    if unsupported_clusters:
        click.secho(
            f'Note: Job queues are not supported on clusters: '
            f'{", ".join(unsupported_clusters)}',
            fg='yellow')


@cli.command()
@flags.config_option(expose_value=False)
@click.option('--provision',
              is_flag=True,
              default=False,
              help='Stream the cluster provisioning logs (provision.log).')
@click.option('--worker',
              '-w',
              default=None,
              type=int,
              help='The worker ID to stream the logs from. '
              'If not set, stream the logs of the head node.')
@click.option(
    '--sync-down',
    '-s',
    is_flag=True,
    default=False,
    help='Sync down the logs of a job to the local machine. For a distributed'
    ' job, a separate log file from each worker will be downloaded.')
@click.option(
    '--status',
    is_flag=True,
    default=False,
    help=('If specified, do not show logs but exit with a status code for the '
          'job\'s status: 0 for succeeded, or 1 for all other statuses.'))
@click.option(
    '--follow/--no-follow',
    is_flag=True,
    default=True,
    help=('Follow the logs of a job. '
          'If --no-follow is specified, print the log so far and exit. '
          '[default: --follow]'))
@click.option(
    '--tail',
    default=0,
    type=int,
    help=('The number of lines to display from the end of the log file. '
          'Default is 0, which means print all lines.'))
@click.argument('cluster',
                required=True,
                type=str,
                **_get_shell_complete_args(_complete_cluster_name))
@click.argument('job_ids', type=str, nargs=-1)
# TODO(zhwu): support logs by job name
@usage_lib.entrypoint
def logs(
    cluster: str,
    job_ids: Tuple[str, ...],
    provision: bool,
    worker: Optional[int],
    sync_down: bool,
    status: bool,  # pylint: disable=redefined-outer-name
    follow: bool,
    tail: int,
):
    # NOTE(dev): Keep the docstring consistent between the Python API and CLI.
    """Tail the log of a job.

    If JOB_ID is not provided, the latest job on the cluster will be used.

    1. If no flags are provided, tail the logs of the job_id specified. At most
    one job_id can be provided.

    2. If ``--status`` is specified, print the status of the job and exit with
    returncode 0 if the job succeeded. At most one job_id can
    be specified. Other possible return codes:

    - 100: job failed.
    - 101: job not finished.
    - 102: job not found.
    - 103: job was cancelled by the user.

    3. If ``--sync-down`` is specified, the logs of the job will be downloaded
    from the cluster and saved to the local machine under
    ``~/sky_logs``. Multiple job_ids can be specified.

    4. If the job fails or fetching the logs fails, the command will exit with
    a non-zero return code.
    """
    if worker is not None:
        if not provision:
            raise click.UsageError(
                '--worker can only be used with --provision.')
        if worker < 1:
            raise click.UsageError('--worker must be a positive integer.')

    if provision and (sync_down or status or job_ids):
        raise click.UsageError(
            '--provision cannot be combined with job log options '
            '(--sync-down/--status/job IDs).')

    if sync_down and status:
        raise click.UsageError(
            'Both --sync_down and --status are specified '
            '(ambiguous). To fix: specify at most one of them.')

    if len(job_ids) > 1 and not sync_down:
        raise click.UsageError(
            f'Cannot stream logs of multiple jobs (IDs: {", ".join(job_ids)}).'
            '\nPass -s/--sync-down to download the logs instead.')

    job_ids = None if not job_ids else job_ids

    if provision:
        # Stream provision logs
        sys.exit(
            sdk.tail_provision_logs(cluster_name=cluster,
                                    worker=worker,
                                    follow=follow,
                                    tail=tail))

    if sync_down:
        with rich_utils.client_status(
                ux_utils.spinner_message('Downloading logs')):
            log_local_path_dict = sdk.download_logs(
                cluster,
                list(job_ids) if job_ids else None)
        style = colorama.Style
        fore = colorama.Fore
        for job, log_local_path in log_local_path_dict.items():
            logger.info(f'{fore.CYAN}Job {job} logs: {log_local_path}'
                        f'{style.RESET_ALL}')
        return

    assert job_ids is None or len(job_ids) <= 1, job_ids
    job_id: Optional[int] = None
    job_ids_to_query: Optional[List[int]] = None
    if job_ids:
        # Already check that len(job_ids) <= 1. This variable is used later
        # in sdk.tail_logs.
        cur_job_id = job_ids[0]
        if not cur_job_id.isdigit():
            raise click.UsageError(f'Invalid job ID {cur_job_id}. '
                                   'Job ID must be integers.')
        job_id = int(cur_job_id)
        job_ids_to_query = [int(job_ids[0])]
    else:
        # job_ids is either None or empty list, so it is safe to cast it here.
        job_ids_to_query = typing.cast(Optional[List[int]], job_ids)
    if status:
        job_statuses = sdk.stream_and_get(
            sdk.job_status(cluster, job_ids_to_query))
        job_id = list(job_statuses.keys())[0]
        # If job_ids is None and no job has been submitted to the cluster,
        # it will return {None: None}.
        if job_id is None:
            click.secho(f'No job found on cluster {cluster!r}.', fg='red')
            sys.exit(exceptions.JobExitCode.NOT_FOUND)
        job_status = list(job_statuses.values())[0]
        job_status_str = job_status.value if job_status is not None else 'None'
        click.echo(f'Job {job_id}: {job_status_str}')
        if job_status == job_lib.JobStatus.SUCCEEDED:
            return
        else:
            returncode = exceptions.JobExitCode.from_job_status(job_status)
            if job_status is None:
                id_str = '' if job_id is None else f'{job_id} '
                click.secho(f'Job {id_str}not found', fg='red')
            sys.exit(returncode)

    job_str = f'job {job_id}'
    if job_id is None:
        job_str = 'the last job'
    logger.info(f'{colorama.Fore.YELLOW}'
                f'Tailing logs of {job_str} on cluster {cluster!r}...'
                f'{colorama.Style.RESET_ALL}')

    # Stream logs from the server.
    sys.exit(sdk.tail_logs(cluster, job_id, follow, tail=tail))


@cli.command()
@flags.config_option(expose_value=False)
@click.argument('cluster',
                required=True,
                type=str,
                **_get_shell_complete_args(_complete_cluster_name))
@flags.all_option('Cancel all jobs from current user on the specified cluster.')
@flags.all_users_option(
    'Cancel all jobs on the specified cluster for all users.')
@flags.yes_option()
@_add_click_options(flags.COMMON_OPTIONS)
@click.argument('jobs', required=False, type=int, nargs=-1)
@usage_lib.entrypoint
def cancel(
    cluster: str,
    all: bool,  # pylint: disable=redefined-builtin
    all_users: bool,
    jobs: List[int],  # pylint: disable=redefined-outer-name
    yes: bool,
    async_call: bool,
):  # pylint: disable=redefined-builtin
    # NOTE(dev): Keep the docstring consistent between the Python API and CLI.
    """Cancel job(s).

    Example usage:

    .. code-block:: bash

      \b
      # Cancel specific jobs on a cluster.
      sky cancel cluster_name 1
      sky cancel cluster_name 1 2 3
      \b
      # Cancel all your jobs on a cluster.
      sky cancel cluster_name -a
      \b
      # Cancel all users' jobs on a cluster.
      sky cancel cluster_name -u
      \b
      # Cancel the latest running job on a cluster.
      sky cancel cluster_name
      \b
      # Cancel the latest running job on all matching clusters.
      sky cancel cluster-glob*
      \b
      # Cancel all your jobs on all matching clusters.
      sky cancel cluster-glob* -a

    Job IDs can be looked up by ``sky queue cluster_name``.
    Cluster names support glob patterns (e.g., px* matches px1, px2).
    """
    # Handle glob patterns in cluster names
    matching_clusters = []
    if '*' in cluster or '?' in cluster or '[' in cluster:
        # This is a glob pattern, expand it
        try:
            # Get list of all available clusters
            all_records = sdk.get(sdk.status(cluster_names=None,
                                             all_users=True))
            all_clusters = [record['name'] for record in all_records]
            matching_clusters = [
                c for c in all_clusters if fnmatch.fnmatch(c, cluster)
            ]
        except Exception:
            raise click.UsageError(
                f'No clusters match pattern: {cluster!r}') from None
    else:
        # Literal cluster name
        matching_clusters = [cluster]

    if not matching_clusters:
        raise click.UsageError(f'No clusters match pattern: {cluster!r}')

    # Don't allow job IDs when using glob patterns that match multiple clusters
    if len(matching_clusters) > 1 and jobs:
        raise click.UsageError(
            'Cannot specify job IDs when cluster pattern '
            'matches multiple clusters. '
            f'Pattern {cluster!r} matches: {", ".join(matching_clusters)}')

    job_identity_str = ''
    job_ids_to_cancel = None
    if not jobs and not all and not all_users:
        click.echo(f'{colorama.Fore.YELLOW}No job IDs or'
                   ' --all/--all-users provided; '
                   'cancelling the latest running job.'
                   f'{colorama.Style.RESET_ALL}')
        job_identity_str = 'the latest running job'
    elif all_users:
        job_identity_str = 'all users\' jobs'
    else:
        if all:
            job_identity_str = 'all your jobs'
        if jobs:
            jobs_str = ' '.join(map(str, jobs))
            plural = 's' if len(jobs) > 1 else ''
            connector = ' and ' if job_identity_str else ''
            job_identity_str += f'{connector}job{plural} {jobs_str}'
            job_ids_to_cancel = jobs

    job_identity_str += f' on cluster(s): {", ".join(matching_clusters)}'

    if not yes:
        click.confirm(f'Cancelling {job_identity_str}. Proceed?',
                      default=True,
                      abort=True,
                      show_default=True)

    for cluster in matching_clusters:
        try:
            request_id = sdk.cancel(cluster,
                                    all=all,
                                    all_users=all_users,
                                    job_ids=job_ids_to_cancel)
            _async_call_or_wait(request_id, async_call, 'sky.cancel')
        except exceptions.NotSupportedError as e:
            controller = controller_utils.Controllers.from_name(
                cluster, expect_exact_match=False)
            assert controller is not None, cluster
            with ux_utils.print_exception_no_traceback():
                raise click.UsageError(
                    controller.value.decline_cancel_hint) from e
        except ValueError as e:
            raise click.UsageError(str(e))
        except exceptions.ClusterNotUpError:
            with ux_utils.print_exception_no_traceback():
                raise


@cli.command(cls=_DocumentedCodeCommand)
@flags.config_option(expose_value=False)
@click.argument('clusters',
                nargs=-1,
                required=False,
                **_get_shell_complete_args(_complete_cluster_name))
@flags.all_option('Stop all existing clusters.')
@flags.all_users_option('Stop all existing clusters for all users.')
@flags.yes_option()
@_add_click_options(flags.COMMON_OPTIONS)
@usage_lib.entrypoint
def stop(
    clusters: List[str],
    all: bool,  # pylint: disable=redefined-builtin
    all_users: bool,
    yes: bool,
    async_call: bool,
):
    # NOTE(dev): Keep the docstring consistent between the Python API and CLI.
    """Stop cluster(s).

    CLUSTER is the name (or glob pattern) of the cluster to stop.  If both
    CLUSTER and ``--all`` are supplied, the latter takes precedence.

    Data on attached disks is not lost when a cluster is stopped.  Billing for
    the instances will stop, while the disks will still be charged.  Those
    disks will be reattached when restarting the cluster.

    Currently, spot instance clusters cannot be stopped.

    Examples:

    .. code-block:: bash

      # Stop a specific cluster.
      sky stop cluster_name
      \b
      # Stop multiple clusters.
      sky stop cluster1 cluster2
      \b
      # Stop all clusters matching glob pattern 'cluster*'.
      sky stop "cluster*"
      \b
      # Stop all existing clusters.
      sky stop -a

    """
    _down_or_stop_clusters(clusters,
                           apply_to_all=all,
                           all_users=all_users,
                           down=False,
                           no_confirm=yes,
                           async_call=async_call)


@cli.command(cls=_DocumentedCodeCommand)
@flags.config_option(expose_value=False)
@click.argument('clusters',
                nargs=-1,
                required=False,
                **_get_shell_complete_args(_complete_cluster_name))
@flags.all_option('Autostop all existing clusters.')
@flags.all_users_option('Autostop all existing clusters for all users.')
@click.option('--idle-minutes',
              '-i',
              type=int,
              default=None,
              required=False,
              help=('Set the idle minutes before autostopping the cluster. '
                    'See the doc above for detailed semantics.'))
@flags.wait_for_option('idle-minutes')
@click.option(
    '--cancel',
    default=False,
    is_flag=True,
    required=False,
    help='Cancel any currently active auto{stop,down} setting for the '
    'cluster. No-op if there is no active setting.')
@click.option(
    '--down',
    default=False,
    is_flag=True,
    required=False,
    help='Use autodown (tear down the cluster; non-restartable), instead '
    'of autostop (restartable).')
@flags.yes_option()
@_add_click_options(flags.COMMON_OPTIONS)
@usage_lib.entrypoint
def autostop(
    clusters: List[str],
    all: bool,  # pylint: disable=redefined-builtin
    all_users: bool,
    idle_minutes: Optional[int],
    wait_for: Optional[str],
    cancel: bool,  # pylint: disable=redefined-outer-name
    down: bool,  # pylint: disable=redefined-outer-name
    yes: bool,
    async_call: bool,
):
    # NOTE(dev): Keep the docstring consistent between the Python API and CLI.
    """Schedule an autostop or autodown for cluster(s).

    Autostop/autodown will automatically stop or teardown a cluster when it
    becomes idle for a specified duration.

    CLUSTERS are the names (or glob patterns) of the clusters to stop. If both
    CLUSTERS and ``--all`` are supplied, the latter takes precedence.

    Idleness time of a cluster is reset to zero, when any of these happens:

    - A job is submitted (``sky launch`` or ``sky exec``).

    - The cluster has restarted.

    - An autostop idle time is set.

    - An SSH session is active (To disable this, set ``--wait-for jobs``).

    To disable the idleness timer completely and set a hard time limit, set
    ``--wait-for none``.

    Example 1: say a cluster with autostop set to 2 hours has been idle for 1
    hour, then autostop is reset to 30 minutes. The cluster will not be
    immediately autostopped. Instead, the idleness timer restarts counting
    when the second autostop setting of 30 minutes was submitted.

    Example 2: say a cluster without any autostop set has been idle for 1 hour,
    then an autostop of 30 minutes is set. The cluster will not be immediately
    autostopped. Instead, the idleness timer only starts counting after the
    autostop setting was set.

    Typical usage:

    .. code-block:: bash

        # Autostop this cluster after 60 minutes of idleness.
        sky autostop cluster_name -i 60
        \b
        # Cancel autostop for a specific cluster.
        sky autostop cluster_name --cancel
        \b
        # Autostop this cluster after 60 minutes, regardless of activity.
        sky autostop cluster_name -i 60 --wait-for none
        \b
        # Autodown this cluster after 60 minutes of idleness.
        sky autostop cluster_name -i 60 --down
    """
    if cancel and idle_minutes is not None:
        raise click.UsageError(
            'Only one of --idle-minutes and --cancel should be specified. '
            f'cancel: {cancel}, idle_minutes: {idle_minutes}')
    if cancel:
        idle_minutes = -1
    elif idle_minutes is None:
        idle_minutes = 5
    _down_or_stop_clusters(
        clusters,
        apply_to_all=all,
        all_users=all_users,
        down=down,
        no_confirm=yes,
        idle_minutes_to_autostop=idle_minutes,
        wait_for=autostop_lib.AutostopWaitFor.from_str(wait_for)
        if wait_for is not None else None,
        async_call=async_call)


@cli.command(cls=_DocumentedCodeCommand)
@flags.config_option(expose_value=False)
@click.argument('clusters',
                nargs=-1,
                required=False,
                **_get_shell_complete_args(_complete_cluster_name))
@flags.all_option('Start all existing clusters.')
@flags.yes_option()
@click.option(
    '--idle-minutes-to-autostop',
    '-i',
    default=None,
    type=int,
    required=False,
    help=('Automatically stop the cluster after this many minutes '
          'of idleness, i.e., no running or pending jobs in the cluster\'s job '
          'queue. Idleness gets reset depending on the ``--wait-for`` flag. '
          'Setting this flag is equivalent to '
          'running ``sky launch -d ...`` and then ``sky autostop -i <minutes>``'
          '. If not set, the cluster will not be autostopped.'))
@flags.wait_for_option('idle-minutes-to-autostop')
@click.option(
    '--down',
    default=False,
    is_flag=True,
    required=False,
    help=
    ('Autodown the cluster: tear down the cluster after specified minutes of '
     'idle time after all jobs finish (successfully or abnormally). Requires '
     '--idle-minutes-to-autostop to be set.'),
)
@click.option(
    '--retry-until-up',
    '-r',
    default=False,
    is_flag=True,
    required=False,
    # Disabling quote check here, as there seems to be a bug in pylint,
    # which incorrectly recognizes the help string as a docstring.
    # pylint: disable=bad-docstring-quotes
    help=('Retry provisioning infinitely until the cluster is up, '
          'if we fail to start the cluster due to unavailability errors.'),
)
@click.option(
    '--force',
    '-f',
    default=False,
    is_flag=True,
    required=False,
    help=('Force start the cluster even if it is already UP. Useful for '
          'upgrading the SkyPilot runtime on the cluster.'))
@_add_click_options(flags.COMMON_OPTIONS)
@usage_lib.entrypoint
# pylint: disable=redefined-builtin
def start(
    clusters: List[str],
    all: bool,
    yes: bool,
    idle_minutes_to_autostop: Optional[int],
    wait_for: Optional[str],
    down: bool,  # pylint: disable=redefined-outer-name
    retry_until_up: bool,
    force: bool,
    async_call: bool,
):
    # NOTE(dev): Keep the docstring consistent between the Python API and CLI.
    """Restart cluster(s).

    If a cluster is previously stopped (status is STOPPED) or failed in
    provisioning/runtime installation (status is INIT), this command will
    attempt to start the cluster.  In the latter case, provisioning and runtime
    installation will be retried.

    Auto-failover provisioning is not used when restarting a stopped
    cluster. It will be started on the same cloud, region, and zone that were
    chosen before.

    If a cluster is already in the UP status, this command has no effect.

    Examples:

    .. code-block:: bash

      # Restart a specific cluster.
      sky start cluster_name
      \b
      # Restart multiple clusters.
      sky start cluster1 cluster2
      \b
      # Restart all clusters.
      sky start -a

    """
    if down and idle_minutes_to_autostop is None:
        raise click.UsageError(
            '--idle-minutes-to-autostop must be set if --down is set.')
    to_start = []

    cluster_records = None
    if not clusters and not all:
        # UX: frequently users may have only 1 cluster. In this case, be smart
        # and default to that unique choice.
        all_clusters = _get_cluster_records_and_set_ssh_config(
            clusters=None, refresh=common.StatusRefreshMode.AUTO)
        if len(all_clusters) <= 1:
            cluster_records = all_clusters
        else:
            raise click.UsageError(
                '`sky start` requires either a cluster name or glob '
                '(see `sky status`), or the -a/--all flag.')

    if all:
        if clusters:
            click.echo('Both --all and cluster(s) specified for sky start. '
                       'Letting --all take effect.')

        all_clusters = _get_cluster_records_and_set_ssh_config(
            clusters=None, refresh=common.StatusRefreshMode.AUTO)

        # Get all clusters that are not controllers.
        cluster_records = [
            cluster for cluster in all_clusters
            if controller_utils.Controllers.from_name(
                cluster['name'], expect_exact_match=False) is None
        ]
    if cluster_records is None:
        # Get GLOB cluster names
        cluster_records = _get_cluster_records_and_set_ssh_config(
            clusters, refresh=common.StatusRefreshMode.AUTO)

    if not cluster_records:
        click.echo('Cluster(s) not found (tip: see `sky status`). Do you '
                   'mean to use `sky launch` to provision a new cluster?')
        return
    else:
        for cluster in cluster_records:
            name = cluster['name']
            cluster_status = cluster['status']
            # A cluster may have one of the following states:
            #
            #  STOPPED - ok to restart
            #    (currently, only AWS/GCP non-spot clusters can be in this
            #    state)
            #
            #  UP - skipped, see below
            #
            #  INIT - ok to restart:
            #    1. It can be a failed-to-provision cluster, so it isn't up
            #      (Ex: launch --gpus=A100:8).  Running `sky start` enables
            #      retrying the provisioning - without setup steps being
            #      completed. (Arguably the original command that failed should
            #      be used instead; but using start isn't harmful - after it
            #      gets provisioned successfully the user can use the original
            #      command).
            #
            #    2. It can be an up cluster that failed one of the setup steps.
            #      This way 'sky start' can change its status to UP, enabling
            #      'sky ssh' to debug things (otherwise `sky ssh` will fail an
            #      INIT state cluster due to head_ip not being cached).
            #
            #      This can be replicated by adding `exit 1` to Task.setup.
            if (not force and cluster_status == status_lib.ClusterStatus.UP):
                # An UP cluster; skipping 'sky start' because:
                #  1. For a really up cluster, this has no effects (ray up -y
                #    --no-restart) anyway.
                #  2. A cluster may show as UP but is manually stopped in the
                #    UI.  If Azure/GCP: ray autoscaler doesn't support reusing,
                #    so 'sky start existing' will actually launch a new
                #    cluster with this name, leaving the original cluster
                #    zombied (remains as stopped in the cloud's UI).
                #
                #    This is dangerous and unwanted behavior!
                click.echo(f'Cluster {name} already has status UP.')
                continue

            assert force or cluster_status in (
                status_lib.ClusterStatus.INIT,
                status_lib.ClusterStatus.STOPPED), cluster_status
            to_start.append(name)
    if not to_start:
        return

    # Checks for controller clusters (jobs controller / sky serve controller).
    controllers, normal_clusters = [], []
    for name in to_start:
        if controller_utils.Controllers.from_name(
                name, expect_exact_match=False) is not None:
            controllers.append(name)
        else:
            normal_clusters.append(name)
    if controllers and normal_clusters:
        # Keep this behavior the same as _down_or_stop_clusters().
        raise click.UsageError('Starting controllers with other cluster(s) '
                               'is currently not supported.\n'
                               'Please start the former independently.')
    if controllers:
        bold = ux_utils.BOLD
        reset_bold = ux_utils.RESET_BOLD
        if len(controllers) != 1:
            raise click.UsageError(
                'Starting multiple controllers is currently not supported.\n'
                'Please start them independently.')
        if idle_minutes_to_autostop is not None:
            raise click.UsageError(
                'Autostop options are currently not allowed when starting the '
                'controllers. Use the default autostop settings by directly '
                f'calling: {bold}sky start {" ".join(controllers)}{reset_bold}')

    if not yes:
        cluster_str = 'clusters' if len(to_start) > 1 else 'cluster'
        cluster_list = ', '.join(to_start)
        click.confirm(
            f'Restarting {len(to_start)} {cluster_str}: '
            f'{cluster_list}. Proceed?',
            default=True,
            abort=True,
            show_default=True)

    request_ids = subprocess_utils.run_in_parallel(
        lambda name: sdk.start(name,
                               idle_minutes_to_autostop,
                               autostop_lib.AutostopWaitFor.from_str(wait_for)
                               if wait_for is not None else None,
                               retry_until_up,
                               down=down,
                               force=force), to_start)

    for name, request_id in zip(to_start, request_ids):
        try:
            _async_call_or_wait(request_id, async_call, 'sky.start')
            if not async_call:
                # Add ssh config for the cluster
                _get_cluster_records_and_set_ssh_config(clusters=[name])
        except (exceptions.NotSupportedError,
                exceptions.ClusterOwnerIdentityMismatchError) as e:
            click.echo(str(e))
        else:
            if not async_call:
                click.secho(f'Cluster {name} started.', fg='green')


@cli.command(cls=_DocumentedCodeCommand)
@flags.config_option(expose_value=False)
@click.argument('clusters',
                nargs=-1,
                required=False,
                **_get_shell_complete_args(_complete_cluster_name))
@flags.all_option('Tear down all existing clusters.')
@flags.all_users_option('Tear down all existing clusters for all users.')
@flags.yes_option()
@click.option(
    '--purge',
    '-p',
    is_flag=True,
    default=False,
    required=False,
    help=('(Advanced) Forcefully remove the cluster(s) from '
          'SkyPilot\'s cluster table, even if the actual cluster termination '
          'failed on the cloud. WARNING: This flag should only be set sparingly'
          ' in certain manual troubleshooting scenarios; with it set, it is the'
          ' user\'s responsibility to ensure there are no leaked instances and '
          'related resources.'))
@_add_click_options(flags.COMMON_OPTIONS)
@usage_lib.entrypoint
def down(
    clusters: List[str],
    all: bool,  # pylint: disable=redefined-builtin
    all_users: bool,
    yes: bool,
    purge: bool,
    async_call: bool,
):
    # NOTE(dev): Keep the docstring consistent between the Python API and CLI.
    """Tear down cluster(s).

    CLUSTER is the name of the cluster (or glob pattern) to tear down.  If both
    CLUSTER and ``--all`` are supplied, the latter takes precedence.

    Tearing down a cluster will delete all associated resources (all billing
    stops), and any data on the attached disks will be lost.  Accelerators
    (e.g., TPUs) that are part of the cluster will be deleted too.


    Examples:

    .. code-block:: bash

      # Tear down a specific cluster.
      sky down cluster_name
      \b
      # Tear down multiple clusters.
      sky down cluster1 cluster2
      \b
      # Tear down all clusters matching glob pattern 'cluster*'.
      sky down "cluster*"
      \b
      # Tear down all existing clusters.
      sky down -a

    """
    _down_or_stop_clusters(clusters,
                           apply_to_all=all,
                           all_users=all_users,
                           down=True,
                           no_confirm=yes,
                           purge=purge,
                           async_call=async_call)


def _hint_or_raise_for_down_jobs_controller(controller_name: str,
                                            purge: bool) -> None:
    """Helper function to check job controller status before tearing it down.

    Raises helpful exceptions and errors if the controller is not in a safe
    state to be torn down.

    Raises:
        RuntimeError: if failed to get the job queue.
        exceptions.NotSupportedError: if the controller is not in a safe state
            to be torn down (e.g., because it has jobs running or
            it is in init state)
    """
    controller = controller_utils.Controllers.from_name(
        controller_name, expect_exact_match=False)
    assert controller is not None, controller_name

    status_counts: Optional[Dict[str, int]] = None
    managed_jobs_: List[responses.ManagedJobRecord] = []
    with rich_utils.client_status(
            '[bold cyan]Checking for in-progress managed jobs and pools[/]'):
        try:
            fields = _DEFAULT_MANAGED_JOB_FIELDS_TO_GET + _USER_NAME_FIELD
            request_id, queue_result_version = cli_utils.get_managed_job_queue(
                refresh=False,
                skip_finished=True,
                all_users=True,
                fields=fields,
            )
            result = sdk.stream_and_get(request_id)
            if queue_result_version.v2():
                managed_jobs_, _, status_counts, _ = result
            else:
                managed_jobs_ = typing.cast(List[responses.ManagedJobRecord],
                                            result)
            request_id_pools = managed_jobs.pool_status(pool_names=None)
            pools_ = sdk.stream_and_get(request_id_pools)
        except exceptions.ClusterNotUpError as e:
            if controller.value.connection_error_hint in str(e):
                with ux_utils.print_exception_no_traceback():
                    raise exceptions.NotSupportedError(
                        controller.value.
                        decline_down_when_failed_to_fetch_status_hint)
            if e.cluster_status is None:
                click.echo(
                    'Managed jobs controller has already been torn down.')
                sys.exit(0)
            # At this point, the managed jobs are failed to be fetched due to
            # the controller being STOPPED or being firstly launched, i.e.,
            # there is no in-prgress managed jobs.
            managed_jobs_ = []
            pools_ = []

    msg = (f'{colorama.Fore.YELLOW}WARNING: Tearing down the managed '
           'jobs controller. Please be aware of the following:'
           f'{colorama.Style.RESET_ALL}'
           '\n * All logs and status information of the managed '
           'jobs (output of `sky jobs queue`) will be lost.')
    click.echo(msg)
    if managed_jobs_:
        job_table = table_utils.format_job_table(
            managed_jobs_,
            show_all=False,
            show_user=True,
            status_counts=status_counts,
        )
        msg = controller.value.decline_down_for_dirty_controller_hint
        # Add prefix to each line to align with the bullet point.
        msg += '\n'.join(
            ['   ' + line for line in job_table.split('\n') if line != ''])
        if purge:
            logger.warning('--purge is set, ignoring the in-progress managed '
                           'jobs. This could cause leaked clusters!')
        else:
            with ux_utils.print_exception_no_traceback():
                raise exceptions.NotSupportedError(msg)
    elif pools_:
        pool_names = ', '.join([pool['name'] for pool in pools_])
        if purge:
            logger.warning('--purge is set, ignoring the in-progress pools. '
                           'This could cause leaked clusters!')
        else:
            msg = (f'{colorama.Fore.YELLOW}WARNING: Tearing down the managed '
                   'jobs controller is not supported, as it is currently '
                   f'hosting the following pools: {pool_names}. Please '
                   'terminate the pools first with '
                   f'{colorama.Style.BRIGHT}sky jobs pool down -a'
                   f'{colorama.Style.RESET_ALL}.')
            with ux_utils.print_exception_no_traceback():
                raise exceptions.NotSupportedError(msg)
    else:
        click.echo(' * No in-progress managed jobs or running pools found. It '
                   'should be safe to terminate (see caveats above).')


def _hint_or_raise_for_down_sky_serve_controller(controller_name: str,
                                                 purge: bool) -> None:
    """Helper function to check serve controller status before tearing it down.

    Raises helpful exceptions and errors if the controller is not in a safe
    state to be torn down.

    Raises:
        RuntimeError: if failed to get the service status.
        exceptions.NotSupportedError: if the controller is not in a safe state
            to be torn down (e.g., because it has services running or
            it is in init state)
    """
    controller = controller_utils.Controllers.from_name(
        controller_name, expect_exact_match=False)
    assert controller is not None, controller_name
    with rich_utils.client_status('[bold cyan]Checking for live services[/]'):
        try:
            request_id = serve_lib.status(service_names=None)
            services = sdk.stream_and_get(request_id)
        except exceptions.ClusterNotUpError as e:
            if controller.value.connection_error_hint in str(e):
                with ux_utils.print_exception_no_traceback():
                    raise exceptions.NotSupportedError(
                        controller.value.
                        decline_down_when_failed_to_fetch_status_hint)
            if e.cluster_status is None:
                click.echo('Serve controller has already been torn down.')
                sys.exit(0)
            # At this point, the services are failed to be fetched due to the
            # controller being STOPPED or being firstly launched, i.e., there is
            # no in-prgress services.
            services = []

    if services:
        service_names = [service['name'] for service in services]
        if purge:
            logger.warning('--purge is set, ignoring the in-progress services. '
                           'This could cause leaked clusters!')
        else:
            with ux_utils.print_exception_no_traceback():
                msg = (controller.value.decline_down_for_dirty_controller_hint.
                       format(service_names=', '.join(service_names)))
                raise exceptions.NotSupportedError(msg)
    # Do nothing for STOPPED state, as it is safe to terminate the cluster.
    click.echo(f'Terminate sky serve controller: {controller_name}.')


def _controller_to_hint_or_raise(
        controller: controller_utils.Controllers
) -> Callable[[str, bool], None]:
    if controller == controller_utils.Controllers.JOBS_CONTROLLER:
        return _hint_or_raise_for_down_jobs_controller
    return _hint_or_raise_for_down_sky_serve_controller


def _down_or_stop_clusters(
        names: List[str],
        apply_to_all: bool = False,
        all_users: bool = False,
        down: bool = False,  # pylint: disable=redefined-outer-name
        no_confirm: bool = True,
        purge: bool = False,
        idle_minutes_to_autostop: Optional[int] = None,
        wait_for: Optional[autostop_lib.AutostopWaitFor] = None,
        async_call: bool = False) -> None:
    """Tears down or (auto-)stops a cluster (or all clusters).

    Controllers (jobs controller and sky serve controller) can only be
    terminated if the cluster name is explicitly and uniquely specified (not
    via glob).

    Args:
        names: The names of the clusters to tear down or stop. If empty,
            apply_to_all or all_users must be set.
        apply_to_all: If True, apply the operation to all clusters.
        all_users: If True, apply the operation to all clusters for all users.
        down: If True, tear down the clusters.
        no_confirm: If True, skip the confirmation prompt.
        purge: If True, forcefully remove the clusters from the cluster table.
        idle_minutes_to_autostop: The number of minutes to wait before
            automatically stopping the cluster.
        wait_for: Determines the condition for resetting the idleness timer.
        async_call: If True, send the request asynchronously.
    """
    if down:
        command = 'down'
    elif idle_minutes_to_autostop is not None:
        command = 'autostop'
    else:
        command = 'stop'
    if not names and not apply_to_all and not all_users:
        raise click.UsageError(
            f'`sky {command}` requires either a cluster name or glob '
            '(see `sky status`), or the -a/--all flag for all your '
            'clusters, or the -u/--all-users flag for all clusters in '
            'your team.')

    operation = 'Terminating' if down else 'Stopping'
    if idle_minutes_to_autostop is not None:
        is_cancel = idle_minutes_to_autostop < 0
        verb = 'Cancelling' if is_cancel else 'Scheduling'
        option_str = 'down' if down else 'stop'
        if is_cancel:
            option_str = '{stop,down}'
        operation = f'{verb} auto{option_str} on'

    names = list(names)
    if names:
        controllers = [
            name for name in names if controller_utils.Controllers.from_name(
                name, expect_exact_match=False) is not None
        ]
        controllers_str = ', '.join(map(repr, controllers))
        names = [
            cluster['name']
            for cluster in _get_cluster_records_and_set_ssh_config(names)
            if controller_utils.Controllers.from_name(
                cluster['name'], expect_exact_match=False) is None
        ]

        # Make sure the controllers are explicitly specified without other
        # normal clusters.
        if controllers:
            if names:
                names_str = ', '.join(map(repr, names))
                raise click.UsageError(
                    f'{operation} controller(s) '
                    f'{controllers_str} with other cluster(s) '
                    f'{names_str} is currently not supported.\n'
                    f'Please omit the controller(s) {controllers}.')
            if len(controllers) > 1:
                raise click.UsageError(
                    f'{operation} multiple controllers '
                    f'{controllers_str} is currently not supported.\n'
                    f'Please specify only one controller.')
            controller_name = controllers[0]
            if not down:
                raise click.UsageError(
                    f'{operation} controller(s) '
                    f'{controllers_str} is currently not supported.')
            else:
                controller = controller_utils.Controllers.from_name(
                    controller_name, expect_exact_match=False)
                assert controller is not None
                hint_or_raise = _controller_to_hint_or_raise(controller)
                try:
                    # TODO(zhwu): This hint or raise is not transactional, which
                    # means even if it passed the check with no in-progress spot
                    # or service and prompt the confirmation for termination,
                    # a user could still do a `sky jobs launch` or a
                    # `sky serve up` before typing the delete, causing a leaked
                    # managed job or service. We should make this check atomic
                    # with the termination.
                    hint_or_raise(controller_name, purge)
                except (exceptions.ClusterOwnerIdentityMismatchError,
                        exceptions.NotSupportedError, RuntimeError) as e:
                    if purge:
                        click.echo(common_utils.format_exception(e))
                    else:
                        raise
                if not purge:
                    confirm_str = 'delete'
                    user_input = click.prompt(
                        f'To proceed, please type {colorama.Style.BRIGHT}'
                        f'{confirm_str!r}{colorama.Style.RESET_ALL}',
                        type=str)
                    if user_input != confirm_str:
                        raise click.Abort()
                else:
                    click.echo('Since --purge is set, errors will be ignored '
                               'and controller will be removed from '
                               'local state.\nSkipping confirmation.')
                no_confirm = True
        names += controllers

    if apply_to_all or all_users:
        all_clusters = _get_cluster_records_and_set_ssh_config(
            clusters=None, all_users=all_users)
        if names:
            click.echo(
                f'Both --all and cluster(s) specified for `sky {command}`. '
                'Letting --all take effect.')
        # We should not remove controllers when --all is specified.
        # Otherwise, it would be very easy to accidentally delete a controller.

        # do not select already stopped clusters for stop command.
        # stopped clusters are still included for down or autostop commands.
        names = [
            record['name']
            for record in all_clusters
            if controller_utils.Controllers.from_name(
                record['name'], expect_exact_match=False) is None and
            (down or idle_minutes_to_autostop is not None or
             record['status'] != status_lib.ClusterStatus.STOPPED)
        ]

    clusters = names
    usage_lib.record_cluster_name_for_current_operation(clusters)

    if not clusters:
        click.echo('Cluster(s) not found (tip: see `sky status`).')
        return

    if not no_confirm and clusters:
        cluster_str = 'clusters' if len(clusters) > 1 else 'cluster'
        cluster_list = ', '.join(clusters)
        click.confirm(
            f'{operation} {len(clusters)} {cluster_str}: '
            f'{cluster_list}. Proceed?',
            default=True,
            abort=True,
            show_default=True)

    plural = 's' if len(clusters) > 1 else ''
    progress = rich_progress.Progress(transient=True,
                                      redirect_stdout=False,
                                      redirect_stderr=False)
    task = progress.add_task(
        f'[bold cyan]{operation} {len(clusters)} cluster{plural}[/]',
        total=len(clusters))

    request_ids = []

    successes: List[str] = []
    failures: List[Tuple[str, str]] = []

    def _down_or_stop(name: str):
        success_progress = False
        if idle_minutes_to_autostop is not None:
            try:
                request_id = sdk.autostop(name, idle_minutes_to_autostop,
                                          wait_for, down)
                request_ids.append(request_id)
                progress.stop()
                _async_call_or_wait(
                    request_id, async_call,
                    server_constants.REQUEST_NAME_PREFIX + operation)
                progress.start()
            except (exceptions.NotSupportedError, exceptions.ClusterNotUpError,
                    exceptions.CloudError) as e:
                message = str(e)
                failures.append((name, str(e)))
            else:  # no exception raised
                success_progress = True
                message = (f'{colorama.Fore.GREEN}{operation} '
                           f'cluster {name!r}...done{colorama.Style.RESET_ALL}')
                successes.append(name)
                if idle_minutes_to_autostop >= 0:
                    option_str = 'down' if down else 'stop'
                    passive_str = 'downed' if down else 'stopped'
                    plural = 's' if idle_minutes_to_autostop != 1 else ''
                    message += (
                        f'\n  The cluster will be auto{passive_str} after '
                        f'{idle_minutes_to_autostop} minute{plural} of '
                        'idleness.'
                        f'\n  To cancel the auto{option_str}, run: '
                        f'{colorama.Style.BRIGHT}'
                        f'sky autostop {name} --cancel'
                        f'{colorama.Style.RESET_ALL}')
        else:
            try:
                if down:
                    request_id = sdk.down(name, purge=purge)
                else:
                    request_id = sdk.stop(name, purge=purge)
                request_ids.append(request_id)
                progress.stop()
                _async_call_or_wait(
                    request_id, async_call,
                    server_constants.REQUEST_NAME_PREFIX + operation)
                progress.start()
                if not async_call:
                    # Remove the cluster from the SSH config file as soon as it
                    # is stopped or downed.
                    cluster_utils.SSHConfigHelper.remove_cluster(name)
            except RuntimeError as e:
                message = (
                    f'{colorama.Fore.RED}{operation} cluster {name}...failed. '
                    f'{colorama.Style.RESET_ALL}'
                    f'\nReason: {common_utils.format_exception(e)}.')
                failures.append((name, str(e)))
            except (exceptions.NotSupportedError,
                    exceptions.ClusterOwnerIdentityMismatchError,
                    exceptions.CloudError) as e:
                message = str(e)
                failures.append((name, str(e)))
            else:  # no exception raised
                message = (
                    f'{colorama.Fore.GREEN}{operation} cluster {name}...done.'
                    f'{colorama.Style.RESET_ALL}')
                successes.append(name)
                if not down:
                    message += ('\n  To restart the cluster, run: '
                                f'{colorama.Style.BRIGHT}sky start {name}'
                                f'{colorama.Style.RESET_ALL}')
                success_progress = True

        progress.stop()
        click.echo(message)
        if success_progress:
            progress.update(task, advance=1)
        progress.start()

    with progress:
        # we write a new line here to avoid the "Waiting for 'sky.down'
        # request to be scheduled" message from being printed on the same line
        # as the "Terminating <num> clusters..." message
        click.echo('')
        subprocess_utils.run_in_parallel(_down_or_stop, clusters)
        progress.live.transient = False
        # Make sure the progress bar not mess up the terminal.
        progress.refresh()

    if async_call:
        click.secho(f'{operation} requests are sent. Check the requests\' '
                    'status with `sky request get <request_id>`.')

    show_summary = len(clusters) > 1

    if show_summary:
        click.echo('\nSummary:')
        if successes:
            # Preserve the original order of clusters as provided by user.
            click.echo('  ✓ Succeeded: ' + ', '.join(successes))
        if failures:
            # Format failures: if one failure, keep on same line. If multiple,
            # indent each failed cluster on its own line for readability.
            if len(failures) == 1:
                name, reason = failures[0]
                first = reason.strip().splitlines()[0]
                first = first if len(first) <= 120 else first[:120] + '…'
                click.echo(f'  ✗ Failed: {name} ({first})')
            else:
                click.echo('  ✗ Failed:')
                for name, reason in failures:
                    first = reason.strip().splitlines()[0]
                    first = first if len(first) <= 120 else first[:120] + '…'
                    click.echo(f'      {name} ({first})')

    if failures:
        click.echo('Cluster(s) failed. See details above.')


@cli.command(cls=_DocumentedCodeCommand)
@flags.config_option(expose_value=False)
@click.argument('infra_list', required=False, type=str, nargs=-1)
@flags.verbose_option('Show the activated account for each cloud.')
@click.option(
    '--workspace',
    '-w',
    type=str,
    help='The workspace to check. If None, all workspaces will be checked.')
@usage_lib.entrypoint
# pylint: disable=redefined-outer-name
def check(infra_list: Tuple[str],
          verbose: bool,
          workspace: Optional[str] = None):
    """Check which clouds are available to use.

    This checks access credentials for all clouds supported by SkyPilot. If a
    cloud is detected to be inaccessible, the reason and correction steps will
    be shown.

    If CLOUDS are specified, checks credentials for only those clouds.

    The enabled clouds are cached and form the "search space" to be considered
    for each task.

    Examples:

    .. code-block:: bash

      # Check credentials for all supported clouds.
      sky check
      \b
      # Check only specific clouds - AWS and GCP.
      sky check aws gcp
    """
    infra_arg = infra_list if len(infra_list) > 0 else None
    request_id = sdk.check(infra_list=infra_arg,
                           verbose=verbose,
                           workspace=workspace)
    sdk.stream_and_get(request_id)
    api_server_url = server_common.get_server_url()
    click.echo()
    click.echo(
        click.style(f'Using SkyPilot API server: {api_server_url}', fg='green'))


@cli.command()
@flags.config_option(expose_value=False)
@click.argument('accelerator_str', required=False)
@flags.all_option('Show details of all GPU/TPU/accelerator offerings.')
@click.option('--infra',
              default=None,
              type=str,
              help='Infrastructure to query. Examples: "aws", "aws/us-east-1"')
@click.option('--cloud',
              default=None,
              type=str,
              help='Cloud provider to query.',
              hidden=True)
@click.option(
    '--region',
    required=False,
    type=str,
    help=
    ('The region to use. If not specified, shows accelerators from all regions.'
    ),
    hidden=True,
)
@click.option(
    '--all-regions',
    is_flag=True,
    default=False,
    help='Show pricing and instance details for a specified accelerator across '
    'all regions and clouds.')
@catalog.fallback_to_default_catalog
@usage_lib.entrypoint
def show_gpus(
        accelerator_str: Optional[str],
        all: bool,  # pylint: disable=redefined-builtin
        infra: Optional[str],
        cloud: Optional[str],
        region: Optional[str],
        all_regions: bool):
    """Show supported GPU/TPU/accelerators and their prices.

    The names and counts shown can be set in the ``accelerators`` field in task
    YAMLs, or in the ``--gpus`` flag in CLI commands. For example, if this
    table shows 8x V100s are supported, then the string ``V100:8`` will be
    accepted by the above.

    To show the detailed information of a GPU/TPU type (its price, which clouds
    offer it, the quantity in each VM type, etc.), use ``sky show-gpus <gpu>``.

    To show all accelerators, including less common ones and their detailed
    information, use ``sky show-gpus --all``.

    To show all regions for a specified accelerator, use
    ``sky show-gpus <accelerator> --all-regions``.

    If ``--region`` or ``--all-regions`` is not specified, the price displayed
    for each instance type is the lowest across all regions for both on-demand
    and spot instances. There may be multiple regions with the same lowest
    price.

    If ``--cloud kubernetes`` or ``--cloud k8s`` is specified, it will show the
    maximum quantities of the GPU available on a single node and the real-time
    availability of the GPU across all nodes in the Kubernetes cluster.

    If ``--cloud slurm`` is specified, it will show the maximum quantities of
    the GPU available on a single node and the real-time availability of the
    GPU across all nodes in the Slurm cluster.

    Definitions of certain fields:

    * ``DEVICE_MEM``: Memory of a single device; does not depend on the device
      count of the instance (VM).

    * ``HOST_MEM``: Memory of the host instance (VM).

    * ``QTY_PER_NODE`` (Kubernetes only): GPU quantities that can be requested
      on a single node.

    * ``UTILIZATION`` (Kubernetes only): Total number of GPUs free / available
      in the Kubernetes cluster.
    """
    cloud, region, _ = _handle_infra_cloud_region_zone_options(infra,
                                                               cloud,
                                                               region,
                                                               zone=None)

    # cloud and region could be '*' from _handle_infra_cloud_region_zone_options
    # which normally indicates to
    # _make_task_or_dag_from_entrypoint_with_overrides -> _parse_override_params
    # to disregard the cloud and region from the YAML.
    # In show_gpus, there is no YAML, so we need to handle the '*' value
    # directly here. We should use None instead to indicate "any".
    if cloud == '*':
        cloud = None
    if region == '*':
        region = None

    # validation for the --region flag
    if region is not None and cloud is None:
        raise click.UsageError(
            'The --region flag is only valid when the --cloud flag is set.')

    # validation for the --all-regions flag
    if all_regions and accelerator_str is None:
        raise click.UsageError(
            'The --all-regions flag is only valid when an accelerator '
            'is specified.')
    if all_regions and region is not None:
        raise click.UsageError(
            '--all-regions and --region flags cannot be used simultaneously.')

    # This will validate 'cloud' and raise if not found.
    cloud_obj = registry.CLOUD_REGISTRY.from_str(cloud)
    cloud_name = str(cloud_obj).lower() if cloud is not None else None
    show_all = all
    if show_all and accelerator_str is not None:
        raise click.UsageError('--all is only allowed without a GPU name.')

    # Kubernetes specific bools
    enabled_clouds = sdk.get(sdk.enabled_clouds())
    cloud_is_kubernetes = isinstance(
        cloud_obj, clouds.Kubernetes) and not isinstance(cloud_obj, clouds.SSH)
    cloud_is_ssh = isinstance(cloud_obj, clouds.SSH)
    cloud_is_slurm = isinstance(cloud_obj, clouds.Slurm)

    # TODO(romilb): We should move this to the backend.
    kubernetes_autoscaling = skypilot_config.get_effective_region_config(
        cloud='kubernetes',
        region=region,
        keys=('autoscaler',),
        default_value=None) is not None
    kubernetes_is_enabled = clouds.Kubernetes.canonical_name() in enabled_clouds
    ssh_is_enabled = clouds.SSH.canonical_name() in enabled_clouds
    slurm_is_enabled = clouds.Slurm.canonical_name() in enabled_clouds
    query_k8s_realtime_gpu = (kubernetes_is_enabled and
                              (cloud_name is None or cloud_is_kubernetes))
    query_ssh_realtime_gpu = (ssh_is_enabled and
                              (cloud_name is None or cloud_is_ssh))

    def _list_to_str(lst):

        def format_number(n):
            # If it's a float that's a whole number, display as int
            if isinstance(n, float) and n.is_integer():
                return str(int(n))
            return str(n)

        return ', '.join([format_number(n) for n in lst])

    # TODO(zhwu,romilb): We should move most of these kubernetes related
    # queries into the backend, especially behind the server.
    def _get_kubernetes_realtime_gpu_tables(
        context: Optional[str] = None,
        name_filter: Optional[str] = None,
        quantity_filter: Optional[int] = None,
        is_ssh: bool = False,
    ) -> Tuple[List[Tuple[str, 'prettytable.PrettyTable']],
               Optional['prettytable.PrettyTable'], List[Tuple[
                   str, 'models.KubernetesNodesInfo']]]:
        if quantity_filter:
            qty_header = 'QTY_FILTER'
        else:
            qty_header = 'REQUESTABLE_QTY_PER_NODE'

        realtime_gpu_availability_lists = sdk.stream_and_get(
            sdk.realtime_kubernetes_gpu_availability(
                context=context,
                name_filter=name_filter,
                quantity_filter=quantity_filter,
                is_ssh=is_ssh))
        if not realtime_gpu_availability_lists:
            # Customize message based on context
            identity = ('SSH Node Pool'
                        if is_ssh else 'any allowed Kubernetes cluster')
            cloud_name = 'ssh' if is_ssh else 'kubernetes'
            err_msg = f'No GPUs found in {identity}. '
            debug_msg = (f'To further debug, run: sky check {cloud_name}')
            if name_filter is not None:
                gpu_info_msg = f' {name_filter!r}'
                if quantity_filter is not None:
                    gpu_info_msg += (' with requested quantity'
                                     f' {quantity_filter}')
                err_msg = (f'Resources{gpu_info_msg} not found '
                           f'in {identity}. ')
                identity_short = 'SSH Node Pool' if is_ssh else 'Kubernetes'
                debug_msg = (
                    f'To show available accelerators in {identity_short}, '
                    f'run: sky show-gpus --cloud {cloud_name}')
            full_err_msg = (err_msg + kubernetes_constants.NO_GPU_HELP_MESSAGE +
                            debug_msg)
            raise ValueError(full_err_msg)
        no_permissions_str = '<no permissions>'
        realtime_gpu_infos = []
        total_gpu_info: Dict[str, List[int]] = collections.defaultdict(
            lambda: [0, 0])
        all_nodes_info = []

        # display an aggregated table for all contexts
        # if there are more than one contexts with GPUs.
        def _filter_ctx(ctx: str) -> bool:
            ctx_is_ssh = ctx and ctx.startswith('ssh-')
            return ctx_is_ssh is is_ssh

        num_filtered_contexts = 0

        if realtime_gpu_availability_lists:
            for (ctx, availability_list) in realtime_gpu_availability_lists:
                if not _filter_ctx(ctx):
                    continue
                if is_ssh:
                    display_ctx = common_utils.removeprefix(ctx, 'ssh-')
                else:
                    display_ctx = ctx
                num_filtered_contexts += 1
                realtime_gpu_table = log_utils.create_table(
                    ['GPU', qty_header, 'UTILIZATION'])
                for realtime_gpu_availability in sorted(availability_list):
                    gpu_availability = models.RealtimeGpuAvailability(
                        *realtime_gpu_availability)
                    available_qty = (gpu_availability.available
                                     if gpu_availability.available != -1 else
                                     no_permissions_str)
                    realtime_gpu_table.add_row([
                        gpu_availability.gpu,
                        _list_to_str(gpu_availability.counts),
                        f'{available_qty} of {gpu_availability.capacity} free',
                    ])
                    gpu = gpu_availability.gpu
                    capacity = gpu_availability.capacity
                    # we want total, so skip permission denied.
                    available = max(gpu_availability.available, 0)
                    if capacity > 0:
                        total_gpu_info[gpu][0] += capacity
                        total_gpu_info[gpu][1] += available
                realtime_gpu_infos.append((display_ctx, realtime_gpu_table))
                # Collect node info for this context
                nodes_info = sdk.stream_and_get(
                    sdk.kubernetes_node_info(context=ctx))
                all_nodes_info.append((display_ctx, nodes_info))
        if num_filtered_contexts > 1:
            total_realtime_gpu_table = log_utils.create_table(
                ['GPU', 'UTILIZATION'])
            for gpu, stats in total_gpu_info.items():
                total_realtime_gpu_table.add_row(
                    [gpu, f'{stats[1]} of {stats[0]} free'])
        else:
            total_realtime_gpu_table = None

        return realtime_gpu_infos, total_realtime_gpu_table, all_nodes_info

    def _get_slurm_realtime_gpu_tables(
        name_filter: Optional[str] = None,
        quantity_filter: Optional[int] = None
    ) -> Tuple[List[Tuple[str, 'prettytable.PrettyTable']],
               Optional['prettytable.PrettyTable']]:
        """Get Slurm GPU availability tables.

        Args:
            name_filter: Filter GPUs by name.
            quantity_filter: Filter GPUs by quantity.

        Returns:
            A tuple of (realtime_gpu_infos, total_realtime_gpu_table).
        """
        if quantity_filter:
            qty_header = 'QTY_FILTER'
        else:
            qty_header = 'REQUESTABLE_QTY_PER_NODE'

        realtime_gpu_availability_lists = sdk.stream_and_get(
            sdk.realtime_slurm_gpu_availability(
                name_filter=name_filter, quantity_filter=quantity_filter))
        if not realtime_gpu_availability_lists:
            err_msg = 'No GPUs found in any Slurm partition. '
            debug_msg = 'To further debug, run: sky check slurm '
            if name_filter is not None:
                gpu_info_msg = f' {name_filter!r}'
                if quantity_filter is not None:
                    gpu_info_msg += (' with requested quantity'
                                     f' {quantity_filter}')
                err_msg = (f'Resources{gpu_info_msg} not found '
                           'in any Slurm partition. ')
                debug_msg = ('To show available accelerators on Slurm,'
                             ' run: sky show-gpus --cloud slurm ')
            raise ValueError(err_msg + debug_msg)

        realtime_gpu_infos = []
        total_gpu_info: Dict[str, List[int]] = collections.defaultdict(
            lambda: [0, 0])

        for (slurm_cluster,
             availability_list) in realtime_gpu_availability_lists:
            realtime_gpu_table = log_utils.create_table(
                ['GPU', qty_header, 'UTILIZATION'])
            for realtime_gpu_availability in sorted(availability_list):
                gpu_availability = models.RealtimeGpuAvailability(
                    *realtime_gpu_availability)
                # Use the counts directly from the backend, which are already
                # generated in powers of 2 (plus any actual maximums)
                requestable_quantities = gpu_availability.counts
                realtime_gpu_table.add_row([
                    gpu_availability.gpu,
                    _list_to_str(requestable_quantities),
                    (f'{gpu_availability.available} of '
                     f'{gpu_availability.capacity} free'),
                ])
                gpu = gpu_availability.gpu
                capacity = gpu_availability.capacity
                available = gpu_availability.available
                if capacity > 0:
                    total_gpu_info[gpu][0] += capacity
                    total_gpu_info[gpu][1] += available
            realtime_gpu_infos.append((slurm_cluster, realtime_gpu_table))

        # display an aggregated table for all partitions
        # if there are more than one partitions with GPUs
        if len(realtime_gpu_infos) > 1:
            total_realtime_gpu_table = log_utils.create_table(
                ['GPU', 'UTILIZATION'])
            for gpu, stats in total_gpu_info.items():
                total_realtime_gpu_table.add_row(
                    [gpu, f'{stats[1]} of {stats[0]} free'])
        else:
            total_realtime_gpu_table = None

        return realtime_gpu_infos, total_realtime_gpu_table

    def _format_kubernetes_node_info_combined(
            contexts_info: List[Tuple[str, 'models.KubernetesNodesInfo']],
            cloud_str: str = 'Kubernetes',
            context_title_str: str = 'CONTEXT') -> str:
        node_table = log_utils.create_table(
            [context_title_str, 'NODE', 'GPU', 'UTILIZATION'])

        no_permissions_str = '<no permissions>'
        hints = []

        for context, nodes_info in contexts_info:
            context_name = context if context else 'default'
            if nodes_info.hint:
                hints.append(f'{context_name}: {nodes_info.hint}')

            for node_name, node_info in nodes_info.node_info_dict.items():
                available = node_info.free[
                    'accelerators_available'] if node_info.free[
                        'accelerators_available'] != -1 else no_permissions_str
                acc_type = node_info.accelerator_type
                if acc_type is None:
                    acc_type = '-'
                node_table.add_row([
                    context_name, node_name, acc_type,
                    f'{available} of {node_info.total["accelerator_count"]} '
                    'free'
                ])

        k8s_per_node_acc_message = (f'{cloud_str} per-node GPU availability')
        if hints:
            k8s_per_node_acc_message += ' (' + '; '.join(hints) + ')'

        return (f'{colorama.Fore.CYAN}{colorama.Style.BRIGHT}'
                f'{k8s_per_node_acc_message}'
                f'{colorama.Style.RESET_ALL}\n'
                f'{node_table.get_string()}')

    def _format_slurm_node_info() -> str:
        node_table = log_utils.create_table([
            'CLUSTER',
            'NODE',
            'PARTITION',
            'STATE',
            'GPU',
            'UTILIZATION',
        ])

        # Get all cluster names
        slurm_cluster_names = clouds.Slurm.existing_allowed_clusters()

        # Query each cluster
        for cluster_name in slurm_cluster_names:
            nodes_info = sdk.stream_and_get(
                sdk.slurm_node_info(slurm_cluster_name=cluster_name))

            for node_info in nodes_info:
                node_table.add_row([
                    cluster_name,
                    node_info.get('node_name'),
                    node_info.get('partition', '-'),
                    node_info.get('node_state'),
                    node_info.get('gpu_type') or '',
                    (f'{node_info.get("free_gpus", 0)} of '
                     f'{node_info.get("total_gpus", 0)} free'),
                ])

        slurm_per_node_msg = 'Slurm per node accelerator availability'
        # Optional: Add hint message if needed, similar to k8s

        return (f'{colorama.Fore.LIGHTMAGENTA_EX}{colorama.Style.NORMAL}'
                f'{slurm_per_node_msg}'
                f'{colorama.Style.RESET_ALL}\n'
                f'{node_table.get_string()}')

    def _format_kubernetes_realtime_gpu(
            total_table: Optional['prettytable.PrettyTable'],
            k8s_realtime_infos: List[Tuple[str, 'prettytable.PrettyTable']],
            all_nodes_info: List[Tuple[str, 'models.KubernetesNodesInfo']],
            show_node_info: bool, is_ssh: bool) -> Generator[str, None, None]:
        identity = 'SSH Node Pool' if is_ssh else 'Kubernetes'
        yield (f'{colorama.Fore.GREEN}{colorama.Style.BRIGHT}'
               f'{identity} GPUs'
               f'{colorama.Style.RESET_ALL}')
        # print total table
        if total_table is not None:
            yield '\n'
            yield from total_table.get_string()

        ctx_name = 'SSH Node Pool' if is_ssh else 'Context'
        ctx_column_title = 'NODE_POOL' if is_ssh else 'CONTEXT'

        # print individual infos.
        for (ctx, k8s_realtime_table) in k8s_realtime_infos:
            yield '\n'
            # Print context header separately
            if ctx:
                context_str = f'{ctx_name}: {ctx}'
            else:
                context_str = f'Default {ctx_name}'
            yield (
                f'{colorama.Fore.CYAN}{context_str}{colorama.Style.RESET_ALL}\n'
            )
            yield from k8s_realtime_table.get_string()

        if show_node_info:
            yield '\n'
            yield _format_kubernetes_node_info_combined(all_nodes_info,
                                                        identity,
                                                        ctx_column_title)

    def _possibly_show_k8s_like_realtime(
            is_ssh: bool = False
    ) -> Generator[str, None, Tuple[bool, bool, str]]:
        # If cloud is kubernetes, we want to show real-time capacity
        k8s_messages = ''
        print_section_titles = False
        if (is_ssh and query_ssh_realtime_gpu or query_k8s_realtime_gpu):
            context = region

            try:
                # If --cloud kubernetes is not specified, we want to catch
                # the case where no GPUs are available on the cluster and
                # print the warning at the end.
                k8s_realtime_infos, total_table, all_nodes_info = (
                    _get_kubernetes_realtime_gpu_tables(context, is_ssh=is_ssh))
            except ValueError as e:
                if not (cloud_is_kubernetes or cloud_is_ssh):
                    # Make it a note if cloud is not kubernetes
                    k8s_messages += 'Note: '
                k8s_messages += str(e)
            else:
                print_section_titles = True

                yield from _format_kubernetes_realtime_gpu(total_table,
                                                           k8s_realtime_infos,
                                                           all_nodes_info,
                                                           show_node_info=True,
                                                           is_ssh=is_ssh)

            if kubernetes_autoscaling:
                k8s_messages += ('\n' +
                                 kubernetes_utils.KUBERNETES_AUTOSCALER_NOTE)
        if is_ssh:
            if cloud_is_ssh:
                if not ssh_is_enabled:
                    yield ('SSH Node Pools are not enabled. To fix, run: '
                           'sky check ssh ')
                yield k8s_messages
                return True, print_section_titles, ''
        else:
            if cloud_is_kubernetes:
                if not kubernetes_is_enabled:
                    yield ('Kubernetes is not enabled. To fix, run: '
                           'sky check kubernetes ')
                yield k8s_messages
                return True, print_section_titles, ''
        return False, print_section_titles, k8s_messages

    def _possibly_show_k8s_like_realtime_for_acc(
            name: Optional[str],
            quantity: Optional[int],
            is_ssh: bool = False) -> Generator[str, None, Tuple[bool, bool]]:
        k8s_messages = ''
        print_section_titles = False
        if (is_ssh and query_ssh_realtime_gpu or
                query_k8s_realtime_gpu) and not show_all:
            print_section_titles = True
            # TODO(romilb): Show filtered per node GPU availability here as well
            try:
                (k8s_realtime_infos, total_table,
                 all_nodes_info) = _get_kubernetes_realtime_gpu_tables(
                     context=region,
                     name_filter=name,
                     quantity_filter=quantity,
                     is_ssh=is_ssh)

                yield from _format_kubernetes_realtime_gpu(total_table,
                                                           k8s_realtime_infos,
                                                           all_nodes_info,
                                                           show_node_info=False,
                                                           is_ssh=is_ssh)
            except ValueError as e:
                # In the case of a specific accelerator, show the error message
                # immediately (e.g., "Resources H100 not found ...")
                yield common_utils.format_exception(e, use_bracket=True)
            if kubernetes_autoscaling:
                k8s_messages += ('\n' +
                                 kubernetes_utils.KUBERNETES_AUTOSCALER_NOTE)
            yield k8s_messages
        if is_ssh:
            if cloud_is_ssh:
                if not ssh_is_enabled:
                    yield ('SSH Node Pools are not enabled. To fix, run: '
                           'sky check ssh ')
                return True, print_section_titles
        else:
            if cloud_is_kubernetes:
                if not kubernetes_is_enabled:
                    yield ('Kubernetes is not enabled. To fix, run: '
                           'sky check kubernetes ')
                return True, print_section_titles
        return False, print_section_titles

    def _format_slurm_realtime_gpu(
            total_table, slurm_realtime_infos,
            show_node_info: bool) -> Generator[str, None, None]:
        # print total table
        yield (f'{colorama.Fore.GREEN}{colorama.Style.BRIGHT}'
               'Slurm GPUs'
               f'{colorama.Style.RESET_ALL}\n')
        if total_table is not None:
            yield from total_table.get_string()
            yield '\n'

        # print individual infos.
        for (partition, slurm_realtime_table) in slurm_realtime_infos:
            partition_str = f'Slurm Cluster: {partition}'
            yield (f'{colorama.Fore.CYAN}{colorama.Style.BRIGHT}'
                   f'{partition_str}'
                   f'{colorama.Style.RESET_ALL}\n')
            yield from slurm_realtime_table.get_string()
            yield '\n'
        if show_node_info:
            yield _format_slurm_node_info()

    def _output() -> Generator[str, None, None]:
        gpu_table = log_utils.create_table(
            ['COMMON_GPU', 'AVAILABLE_QUANTITIES'])
        tpu_table = log_utils.create_table(
            ['GOOGLE_TPU', 'AVAILABLE_QUANTITIES'])
        other_table = log_utils.create_table(
            ['OTHER_GPU', 'AVAILABLE_QUANTITIES'])

        name, quantity = None, None

        # Optimization - do not poll for Kubernetes API for fetching
        # common GPUs because that will be fetched later for the table after
        # common GPUs.
        clouds_to_list: Union[Optional[str], List[str]] = cloud_name
        if cloud_name is None:
            clouds_to_list = [
                c for c in constants.ALL_CLOUDS
                if c != 'kubernetes' and c != 'ssh' and c != 'slurm'
            ]

        k8s_messages = ''
        slurm_messages = ''
        k8s_printed = False
        if accelerator_str is None:
            # Collect k8s related messages in k8s_messages and print them at end
            print_section_titles = False
            stop_iter = False
            k8s_messages = ''
            prev_print_section_titles = False
            for is_ssh in [False, True]:
                if prev_print_section_titles:
                    yield '\n\n'
                stop_iter_one, print_section_titles_one, k8s_messages_one = (
                    yield from _possibly_show_k8s_like_realtime(is_ssh))
                k8s_printed = True
                stop_iter = stop_iter or stop_iter_one
                print_section_titles = (print_section_titles or
                                        print_section_titles_one)
                k8s_messages += k8s_messages_one
                prev_print_section_titles = print_section_titles_one
            if stop_iter:
                return
            # If cloud is slurm, we want to show real-time capacity
            if slurm_is_enabled and (cloud_name is None or cloud_is_slurm):
                try:
                    # If --cloud slurm is not specified, we want to catch
                    # the case where no GPUs are available on the cluster and
                    # print the warning at the end.
                    slurm_realtime_infos, total_table = (
                        _get_slurm_realtime_gpu_tables())
                except ValueError as e:
                    if not cloud_is_slurm:
                        # Make it a note if cloud is not slurm
                        slurm_messages += 'Note: '
                    slurm_messages += str(e)
                else:
                    print_section_titles = True
                    if k8s_printed:
                        yield '\n'

                    yield from _format_slurm_realtime_gpu(total_table,
                                                          slurm_realtime_infos,
                                                          show_node_info=True)

            if cloud_is_slurm:
                # Do not show clouds if --cloud slurm is specified
                if not slurm_is_enabled:
                    yield ('Slurm is not enabled. To fix, run: '
                           'sky check slurm ')
                yield slurm_messages
                return

            # For show_all, show the k8s message at the start since output is
            # long and the user may not scroll to the end.
            if show_all and (k8s_messages or slurm_messages):
                if k8s_messages:
                    yield k8s_messages
                if slurm_messages:
                    if k8s_messages:
                        yield '\n'
                    yield slurm_messages
                yield '\n\n'

            list_accelerator_counts_result = sdk.stream_and_get(
                sdk.list_accelerator_counts(
                    gpus_only=True,
                    clouds=clouds_to_list,
                    region_filter=region,
                ))
            # TODO(zhwu): handle the case where no accelerators are found,
            # especially when --region specified a non-existent region.

            if print_section_titles:
                # If section titles were printed above, print again here
                yield '\n\n'
                yield (f'{colorama.Fore.CYAN}{colorama.Style.BRIGHT}'
                       f'Cloud GPUs{colorama.Style.RESET_ALL}\n')

            # "Common" GPUs
            for gpu in catalog.get_common_gpus():
                if gpu in list_accelerator_counts_result:
                    gpu_table.add_row([
                        gpu,
                        _list_to_str(list_accelerator_counts_result.pop(gpu))
                    ])
            yield from gpu_table.get_string()

            # Google TPUs
            for tpu in catalog.get_tpus():
                if tpu in list_accelerator_counts_result:
                    tpu_table.add_row([
                        tpu,
                        _list_to_str(list_accelerator_counts_result.pop(tpu))
                    ])
            if tpu_table.get_string():
                yield '\n\n'
            yield from tpu_table.get_string()

            # Other GPUs
            if show_all:
                yield '\n\n'
                for gpu, qty in sorted(list_accelerator_counts_result.items()):
                    other_table.add_row([gpu, _list_to_str(qty)])
                yield from other_table.get_string()
                yield '\n\n'
            else:
                yield ('\n\nHint: use -a/--all to see all accelerators '
                       '(including non-common ones) and pricing.')
                if k8s_messages or slurm_messages:
                    yield '\n'
                    yield k8s_messages
                    yield slurm_messages
                return
        else:
            # Parse accelerator string
            accelerator_split = accelerator_str.split(':')
            if len(accelerator_split) > 2:
                raise click.UsageError(
                    f'Invalid accelerator string {accelerator_str}. '
                    'Expected format: <accelerator_name>[:<quantity>].')
            if len(accelerator_split) == 2:
                name = accelerator_split[0]
                # Check if quantity is valid
                try:
                    quantity = int(accelerator_split[1])
                    if quantity <= 0:
                        raise ValueError(
                            'Quantity cannot be non-positive integer.')
                except ValueError as invalid_quantity:
                    raise click.UsageError(
                        f'Invalid accelerator quantity {accelerator_split[1]}. '
                        'Expected a positive integer.') from invalid_quantity
            else:
                name, quantity = accelerator_str, None

        print_section_titles = False
        stop_iter = False
        prev_print_section_titles = False
        for is_ssh in [False, True]:
            if prev_print_section_titles:
                yield '\n\n'
            stop_iter_one, print_section_titles_one = (
                yield from _possibly_show_k8s_like_realtime_for_acc(
                    name, quantity, is_ssh))
            stop_iter = stop_iter or stop_iter_one
            print_section_titles = (print_section_titles or
                                    print_section_titles_one)
            prev_print_section_titles = print_section_titles_one
        if stop_iter:
            return

        # Handle Slurm filtering by name and quantity
        if (slurm_is_enabled and (cloud_name is None or cloud_is_slurm) and
                not show_all):
            # Print section title if not showing all and instead a specific
            # accelerator is requested
            print_section_titles = True
            try:
                slurm_realtime_infos, total_table = (
                    _get_slurm_realtime_gpu_tables(name_filter=name,
                                                   quantity_filter=quantity))

                yield from _format_slurm_realtime_gpu(total_table,
                                                      slurm_realtime_infos,
                                                      show_node_info=False)
            except ValueError as e:
                # In the case of a specific accelerator, show the error message
                # immediately (e.g., "Resources A10G not found ...")
                yield str(e)
            yield slurm_messages
        if cloud_is_slurm:
            # Do not show clouds if --cloud slurm is specified
            if not slurm_is_enabled:
                yield ('Slurm is not enabled. To fix, run: '
                       'sky check slurm ')
            return
        # For clouds other than Kubernetes, get the accelerator details
        # Case-sensitive
        list_accelerators_result = sdk.stream_and_get(
            sdk.list_accelerators(gpus_only=True,
                                  name_filter=name,
                                  quantity_filter=quantity,
                                  region_filter=region,
                                  clouds=clouds_to_list,
                                  case_sensitive=False,
                                  all_regions=all_regions))
        # Import here to save module load speed.
        # pylint: disable=import-outside-toplevel,line-too-long
        from sky.catalog import common as catalog_common

        # For each gpu name (count not included):
        #   - Group by cloud
        #   - Sort within each group by prices
        #   - Sort groups by each cloud's (min price, min spot price)
        new_result: Dict[str, List[catalog_common.InstanceTypeInfo]] = {}
        for i, (gpu, items) in enumerate(list_accelerators_result.items()):
            df = pd.DataFrame([t._asdict() for t in items])
            # Determine the minimum prices for each cloud.
            min_price_df = df.groupby('cloud').agg(min_price=('price', 'min'),
                                                   min_spot_price=('spot_price',
                                                                   'min'))
            df = df.merge(min_price_df, on='cloud')
            # Sort within each cloud by price.
            df = df.groupby('cloud', group_keys=False).apply(
                lambda x: x.sort_values(by=['price', 'spot_price']))
            # Sort across groups (clouds).
            df = df.sort_values(by=['min_price', 'min_spot_price'])
            df = df.drop(columns=['min_price', 'min_spot_price'])
            sorted_dataclasses = [
                catalog_common.InstanceTypeInfo(*row)
                for row in df.to_records(index=False)
            ]
            new_result[gpu] = sorted_dataclasses
        list_accelerators_result = new_result

        if print_section_titles and not show_all:
            yield '\n\n'
            yield (f'{colorama.Fore.CYAN}{colorama.Style.BRIGHT}'
                   f'Cloud GPUs{colorama.Style.RESET_ALL}\n')

        if not list_accelerators_result:
            quantity_str = (f' with requested quantity {quantity}'
                            if quantity else '')
            cloud_str = f' on {cloud_obj}.' if cloud_name else ' in cloud catalogs.'
            yield f'Resources \'{name}\'{quantity_str} not found{cloud_str} '
            yield 'To show available accelerators, run: sky show-gpus --all'
            return

        for i, (gpu, items) in enumerate(list_accelerators_result.items()):
            accelerator_table_headers = [
                'GPU',
                'QTY',
                'CLOUD',
                'INSTANCE_TYPE',
                'DEVICE_MEM',
                'vCPUs',
                'HOST_MEM',
                'HOURLY_PRICE',
                'HOURLY_SPOT_PRICE',
            ]
            if not show_all:
                accelerator_table_headers.append('REGION')
            accelerator_table = log_utils.create_table(
                accelerator_table_headers)
            for item in items:
                instance_type_str = item.instance_type if not pd.isna(
                    item.instance_type) else '(attachable)'
                cpu_count = item.cpu_count
                if not pd.isna(cpu_count) and isinstance(
                        cpu_count, (float, int)):
                    if int(cpu_count) == cpu_count:
                        cpu_str = str(int(cpu_count))
                    else:
                        cpu_str = f'{cpu_count:.1f}'
                else:
                    cpu_str = '-'
                device_memory_str = (f'{item.device_memory:.0f}GB' if
                                     not pd.isna(item.device_memory) else '-')
                host_memory_str = f'{item.memory:.0f}GB' if not pd.isna(
                    item.memory) else '-'
                price_str = f'$ {item.price:.3f}' if not pd.isna(
                    item.price) else '-'
                spot_price_str = f'$ {item.spot_price:.3f}' if not pd.isna(
                    item.spot_price) else '-'
                region_str = item.region if not pd.isna(item.region) else '-'
                accelerator_table_vals = [
                    item.accelerator_name,
                    item.accelerator_count,
                    item.cloud,
                    instance_type_str,
                    device_memory_str,
                    cpu_str,
                    host_memory_str,
                    price_str,
                    spot_price_str,
                ]
                if not show_all:
                    accelerator_table_vals.append(region_str)
                accelerator_table.add_row(accelerator_table_vals)

            if i != 0:
                yield '\n\n'
            yield from accelerator_table.get_string()

    outputs = _output()
    if show_all:
        click.echo_via_pager(outputs)
    else:
        for out in outputs:
            click.echo(out, nl=False)
        click.echo()


@cli.group(cls=_NaturalOrderGroup)
def storage():
    """SkyPilot Storage CLI."""
    pass


@storage.command('ls', cls=_DocumentedCodeCommand)
@flags.config_option(expose_value=False)
@flags.verbose_option()
@usage_lib.entrypoint
# pylint: disable=redefined-builtin
def storage_ls(verbose: bool):
    """List storage objects managed by SkyPilot."""
    request_id = sdk.storage_ls()
    storages = sdk.stream_and_get(request_id)
    storage_table = table_utils.format_storage_table(storages, show_all=verbose)
    click.echo(storage_table)


@storage.command('delete', cls=_DocumentedCodeCommand)
@flags.config_option(expose_value=False)
@click.argument('names',
                required=False,
                type=str,
                nargs=-1,
                **_get_shell_complete_args(_complete_storage_name))
@flags.all_option('Delete all storage objects.')
@click.option('--yes',
              '-y',
              default=False,
              is_flag=True,
              required=False,
              help='Skip confirmation prompt.')
@_add_click_options(flags.COMMON_OPTIONS)
@usage_lib.entrypoint
def storage_delete(names: List[str], all: bool, yes: bool, async_call: bool):  # pylint: disable=redefined-builtin
    """Delete storage objects.

    Examples:

    .. code-block:: bash

        # Delete two storage objects.
        sky storage delete imagenet cifar10
        \b
        # Delete all storage objects matching glob pattern 'imagenet*'.
        sky storage delete "imagenet*"
        \b
        # Delete all storage objects.
        sky storage delete -a
    """
    if sum([bool(names), all]) != 1:
        raise click.UsageError('Either --all or a name must be specified.')
    if all:
        storages = sdk.get(sdk.storage_ls())
        if not storages:
            click.echo('No storage(s) to delete.')
            return
        names = [storage['name'] for storage in storages]
    else:
        storages = sdk.get(sdk.storage_ls())
        existing_storage_names = [storage['name'] for storage in storages]
        names = _get_glob_matches(existing_storage_names, names)
    if names:
        if not yes:
            storage_names = ', '.join(names)
            storage_str = 'storages' if len(names) > 1 else 'storage'
            click.confirm(
                f'Deleting {len(names)} {storage_str}: '
                f'{storage_names}. Proceed?',
                default=True,
                abort=True,
                show_default=True)

    request_ids = {}
    # TODO(zhwu): Support all flag for the underlying SDK and API server to
    # avoid multiple requests.
    for name in names:
        request_ids[name] = sdk.storage_delete(name)

    for name, request_id in request_ids.items():
        try:
            _async_call_or_wait(request_id, async_call, 'sky.storage')
        except Exception as e:  # pylint: disable=broad-except
            logger.error(f'{colorama.Fore.RED}Error deleting storage {name}: '
                         f'{common_utils.format_exception(e, use_bracket=True)}'
                         f'{colorama.Style.RESET_ALL}')


@cli.group(cls=_NaturalOrderGroup)
def volumes():
    """SkyPilot Volumes CLI."""
    pass


# Add 'volume' as an alias for 'volumes'
cli.add_command(volumes, name='volume')


@volumes.command('apply', cls=_DocumentedCodeCommand)
@flags.config_option(expose_value=False)
@click.argument('entrypoint',
                required=False,
                type=str,
                nargs=-1,
                **_get_shell_complete_args(_complete_file_name))
@click.option('--name',
              '-n',
              required=False,
              type=str,
              help='Volume name. Override the name defined in the YAML.')
@click.option('--infra',
              required=False,
              type=str,
              help='Infrastructure to use. '
              'Format: cloud, cloud/region, cloud/region/zone, or '
              'k8s/context-name.'
              'Examples: k8s, k8s/my-context, runpod/US/US-CA-2. '
              'Override the infra defined in the YAML.')
@click.option('--type',
              required=False,
              type=click.Choice(volume_utils.VolumeType.supported_types()),
              help='Volume type. Override the type defined in the YAML.')
@click.option('--size',
              required=False,
              type=str,
              help='Volume size. Override the size defined in the YAML.')
@click.option(
    '--use-existing/--no-use-existing',
    required=False,
    default=None,
    help='Whether to use an existing volume. Override the use_existing '
    'defined in the YAML.')
@click.option('--yes',
              '-y',
              is_flag=True,
              default=False,
              required=False,
              help='Skip confirmation prompt.')
@_add_click_options(flags.COMMON_OPTIONS)
@usage_lib.entrypoint
def volumes_apply(
        entrypoint: Optional[Tuple[str, ...]],
        name: Optional[str],
        infra: Optional[str],
        type: Optional[str],  # pylint: disable=redefined-builtin
        size: Optional[str],
        use_existing: Optional[bool],
        yes: bool,
        async_call: bool):
    """Apply a volume.

    Examples:

    .. code-block:: bash

        # Apply a volume from a YAML file.
        sky volumes apply volume.yaml
        \b
        # Apply a volume from a command.
        sky volumes apply --name pvc1 --infra k8s --type k8s-pvc --size 100Gi
        \b
        # Apply a volume with existing PVC `pvc2` from a command.
        sky volumes apply --name pvc2 --infra k8s --type k8s-pvc --size 100Gi
        --use-existing
    """
    # pylint: disable=import-outside-toplevel
    from sky.volumes import volume as volume_lib

    volume_config_dict: Dict[str, Any] = {}
    if entrypoint is not None and len(entrypoint) > 0:
        entrypoint_str = ' '.join(entrypoint)
        is_yaml, yaml_config, yaml_file_provided, invalid_reason = (
            _check_yaml_only(entrypoint_str))
        if not is_yaml:
            if yaml_file_provided:
                raise click.BadParameter(f'{entrypoint_str!r} looks like a '
                                         f'yaml path but {invalid_reason}')
            else:
                raise click.BadParameter(
                    f'{entrypoint_str!r} needs to be a YAML file')
        if yaml_config is not None:
            volume_config_dict = yaml_config.copy()
    override_config = _build_volume_override_config(name, infra, type, size,
                                                    use_existing)
    volume_config_dict.update(override_config)

    # Create Volume instance
    volume = volume_lib.Volume.from_yaml_config(volume_config_dict)

    logger.debug(f'Volume config: {volume.to_yaml_config()}')

    # TODO(kevin): remove the try block in v0.13.0
    try:
        volumes_sdk.validate(volume)
    except exceptions.APINotSupportedError:
        # Do best-effort client-side validation.
        volume.validate(skip_cloud_compatibility=True)

    if not yes:
        click.confirm(f'Proceed to create volume {volume.name!r}?',
                      default=True,
                      abort=True,
                      show_default=True)

    # Call SDK to create volume
    try:
        request_id = volumes_sdk.apply(volume)
        _async_call_or_wait(request_id, async_call, 'sky.volumes.apply')
    except RuntimeError as e:
        logger.error(f'{colorama.Fore.RED}Error applying volume: '
                     f'{common_utils.format_exception(e, use_bracket=True)}'
                     f'{colorama.Style.RESET_ALL}')


def _build_volume_override_config(
    name: Optional[str],
    infra: Optional[str],
    volume_type: Optional[str],
    size: Optional[str],
    use_existing: Optional[bool],
) -> Dict[str, Any]:
    """Parse the volume override config."""
    override_config: Dict[str, Any] = {}
    if name is not None:
        override_config['name'] = name
    if infra is not None:
        override_config['infra'] = infra
    if volume_type is not None:
        override_config['type'] = volume_type
    if size is not None:
        override_config['size'] = size
    if use_existing is not None:
        override_config['use_existing'] = use_existing
    return override_config


@volumes.command('ls', cls=_DocumentedCodeCommand)
@flags.config_option(expose_value=False)
@click.option('--verbose',
              '-v',
              default=False,
              is_flag=True,
              required=False,
              help='Show all information in full.')
@usage_lib.entrypoint
def volumes_ls(verbose: bool):
    """List volumes managed by SkyPilot."""
    request_id = volumes_sdk.ls()
    all_volumes = sdk.stream_and_get(request_id)
    volume_table = table_utils.format_volume_table(all_volumes,
                                                   show_all=verbose)
    click.echo(volume_table)


@volumes.command('delete', cls=_DocumentedCodeCommand)
@flags.config_option(expose_value=False)
@click.argument('names',
                required=False,
                type=str,
                nargs=-1,
                **_get_shell_complete_args(_complete_volume_name))
@click.option('--all',
              '-a',
              default=False,
              is_flag=True,
              required=False,
              help='Delete all volumes.')
@click.option('--yes',
              '-y',
              default=False,
              is_flag=True,
              required=False,
              help='Skip confirmation prompt.')
@_add_click_options(flags.COMMON_OPTIONS)
@usage_lib.entrypoint
def volumes_delete(names: List[str], all: bool, yes: bool, async_call: bool):  # pylint: disable=redefined-builtin
    """Delete volumes.

    Examples:

    .. code-block:: bash

        # Delete two volumes.
        sky volumes delete pvc1 pvc2
        \b
        # Delete all volumes matching glob pattern 'pvc*'.
        sky volumes delete "pvc*"
        \b
        # Delete all volumes.
        sky volumes delete -a
    """
    if sum([bool(names), all]) != 1:
        raise click.UsageError('Either --all or a name must be specified.')
    all_volumes = sdk.get(volumes_sdk.ls())
    if all:
        if not all_volumes:
            click.echo('No volumes to delete.')
            return
        names = [volume['name'] for volume in all_volumes]
    else:
        existing_volume_names = [volume['name'] for volume in all_volumes]
        names = _get_glob_matches(existing_volume_names,
                                  names,
                                  resource_type='Volume')
    if names:
        if not yes:
            volume_names = ', '.join(names)
            volume_str = 'volumes' if len(names) > 1 else 'volume'
            click.confirm(
                f'Deleting {len(names)} {volume_str}: '
                f'{volume_names}. Proceed?',
                default=True,
                abort=True,
                show_default=True)

        try:
            _async_call_or_wait(volumes_sdk.delete(names), async_call,
                                'sky.volumes.delete')
        except Exception as e:  # pylint: disable=broad-except
            logger.error(f'{colorama.Fore.RED}Error deleting volumes {names}: '
                         f'{str(e)}{colorama.Style.RESET_ALL}')


@cli.group(cls=_NaturalOrderGroup)
def jobs():
    """Managed Jobs CLI (jobs with auto-recovery)."""
    pass


@jobs.command('launch', cls=_DocumentedCodeCommand)
@flags.config_option(expose_value=True)
@click.argument('entrypoint',
                required=True,
                type=str,
                nargs=-1,
                **_get_shell_complete_args(_complete_file_name))
# TODO(zhwu): Add --dryrun option to test the launch command.
@_add_click_options(flags.TASK_OPTIONS_WITH_NAME +
                    flags.EXTRA_RESOURCES_OPTIONS + flags.COMMON_OPTIONS)
@click.option('--cluster',
              '-c',
              default=None,
              type=str,
              hidden=True,
              help=('Alias for --name, the name of the managed job.'))
@click.option('--job-recovery',
              default=None,
              type=str,
              help='Recovery strategy to use for managed jobs.')
@click.option(
    '--detach-run',
    '-d',
    default=False,
    is_flag=True,
    help=('If True, as soon as a job is submitted, return from this call '
          'and do not stream execution logs.'))
@click.option('--pool',
              '-p',
              default=None,
              type=str,
              required=False,
              help='(Experimental; optional) Pool to use for jobs submission.')
@click.option('--num-jobs',
              default=None,
              type=int,
              required=False,
              help='Number of jobs to submit.')
@click.option('--git-url', type=str, help='Git repository URL.')
@click.option('--git-ref',
              type=str,
              help='Git reference (branch, tag, or commit hash) to use.')
@flags.yes_option()
@timeline.event
@usage_lib.entrypoint
def jobs_launch(
    entrypoint: Tuple[str, ...],
    name: Optional[str],
    cluster: Optional[str],
    workdir: Optional[str],
    infra: Optional[str],
    cloud: Optional[str],
    region: Optional[str],
    zone: Optional[str],
    gpus: Optional[str],
    cpus: Optional[str],
    memory: Optional[str],
    instance_type: Optional[str],
    num_nodes: Optional[int],
    use_spot: Optional[bool],
    image_id: Optional[str],
    job_recovery: Optional[str],
    env_file: Optional[Dict[str, str]],
    env: List[Tuple[str, str]],
    secret: List[Tuple[str, str]],
    disk_size: Optional[int],
    disk_tier: Optional[str],
    network_tier: Optional[str],
    ports: Tuple[str],
    detach_run: bool,
    yes: bool,
    pool: Optional[str],  # pylint: disable=redefined-outer-name
    num_jobs: Optional[int],
    async_call: bool,
    config_override: Optional[Dict[str, Any]] = None,
    git_url: Optional[str] = None,
    git_ref: Optional[str] = None,
):
    """Launch a managed job from a YAML or a command.

    If ENTRYPOINT points to a valid YAML file, it is read in as the task
    specification. Otherwise, it is interpreted as a bash command.

    Examples:

    .. code-block:: bash

      # You can use normal task YAMLs.
      sky jobs launch task.yaml

      sky jobs launch 'echo hello!'
    """
    if pool is None and num_jobs is not None:
        raise click.UsageError('Cannot specify --num-jobs without --pool.')

    if cluster is not None:
        if name is not None and name != cluster:
            raise click.UsageError('Cannot specify both --name and --cluster. '
                                   'Use one of the flags as they are alias.')
        name = cluster
    env = _merge_env_vars(env_file, env)
    cloud, region, zone = _handle_infra_cloud_region_zone_options(
        infra, cloud, region, zone)
    task_or_dag = _make_task_or_dag_from_entrypoint_with_overrides(
        entrypoint,
        name=name,
        workdir=workdir,
        cloud=cloud,
        region=region,
        zone=zone,
        gpus=gpus,
        cpus=cpus,
        memory=memory,
        instance_type=instance_type,
        num_nodes=num_nodes,
        use_spot=use_spot,
        image_id=image_id,
        env=env,
        secret=secret,
        disk_size=disk_size,
        disk_tier=disk_tier,
        network_tier=network_tier,
        ports=ports,
        job_recovery=job_recovery,
        config_override=config_override,
        git_url=git_url,
        git_ref=git_ref,
    )

    if not isinstance(task_or_dag, dag_lib.Dag):
        assert isinstance(task_or_dag, task_lib.Task), task_or_dag
        with dag_lib.Dag() as dag:
            dag.add(task_or_dag)
            dag.name = task_or_dag.name
    else:
        dag = task_or_dag

    if name is not None:
        dag.name = name

    dag_utils.maybe_infer_and_fill_dag_and_task_names(dag)
    dag_utils.fill_default_config_in_dag_for_job_launch(dag)

    common_utils.check_cluster_name_is_valid(name)

    if pool is not None:
        num_job_int = num_jobs if num_jobs is not None else 1
        plural = '' if num_job_int == 1 else 's'
        click.secho(f'Submitting to pool {colorama.Fore.CYAN}{pool!r}'
                    f'{colorama.Style.RESET_ALL} with {colorama.Fore.CYAN}'
                    f'{num_job_int}{colorama.Style.RESET_ALL} job{plural}.')
        print_setup_fm_warning = False
        for task_ in dag.tasks:
            if (task_.setup is not None or task_.file_mounts or
                    task_.storage_mounts):
                print_setup_fm_warning = True
                break
        if print_setup_fm_warning:
            click.secho(
                f'{colorama.Fore.YELLOW}Setup, file mounts, and storage mounts'
                ' will be ignored when submitting jobs to pool. To update a '
                f'pool, please use `sky jobs pool apply {pool} new-pool.yaml`. '
                f'{colorama.Style.RESET_ALL}')
        print_setup_fm_warning = False

    # Optimize info is only show if _need_confirmation.
    if not yes:
        click.secho(
            f'Managed job {dag.name!r} will be launched on (estimated):',
            fg='yellow')

    request_id = managed_jobs.launch(dag,
                                     name,
                                     pool,
                                     num_jobs,
                                     _need_confirmation=not yes)
    job_id_handle = _async_call_or_wait(request_id, async_call,
                                        'sky.jobs.launch')

    if async_call:
        return

    job_ids = [job_id_handle[0]] if isinstance(job_id_handle[0],
                                               int) else job_id_handle[0]

    if not detach_run:
        if len(job_ids) == 1:
            job_id = job_ids[0]
            returncode = managed_jobs.tail_logs(name=None,
                                                job_id=job_id,
                                                follow=True,
                                                controller=False)
            sys.exit(returncode)
        else:
            # TODO(tian): This can be very long. Considering have a "group id"
            # and query all job ids with the same group id.
            # Sort job ids to ensure consistent ordering.
            job_ids_str = ','.join(map(str, sorted(job_ids)))
            click.secho(
                f'Jobs submitted with IDs: {colorama.Fore.CYAN}'
                f'{job_ids_str}{colorama.Style.RESET_ALL}.'
                f'\n📋 Useful Commands'
                f'\n{ux_utils.INDENT_SYMBOL}To stream job logs:\t\t\t'
                f'{ux_utils.BOLD}sky jobs logs <job-id>'
                f'{ux_utils.RESET_BOLD}'
                f'\n{ux_utils.INDENT_SYMBOL}To stream controller logs:\t\t'
                f'{ux_utils.BOLD}sky jobs logs --controller <job-id>'
                f'{ux_utils.RESET_BOLD}'
                f'\n{ux_utils.INDENT_LAST_SYMBOL}To cancel all jobs on the '
                f'pool:\t{ux_utils.BOLD}sky jobs cancel --pool {pool}'
                f'{ux_utils.RESET_BOLD}')


@jobs.command('queue', cls=_DocumentedCodeCommand)
@flags.config_option(expose_value=False)
@flags.verbose_option()
@click.option(
    '--limit',
    '-l',
    default=_NUM_MANAGED_JOBS_TO_SHOW,
    type=int,
    required=False,
    help=(f'Number of jobs to show, default is {_NUM_MANAGED_JOBS_TO_SHOW},'
          f' use "-a/--all" to show all jobs.'))
@click.option(
    '--refresh',
    '-r',
    default=False,
    is_flag=True,
    required=False,
    help='Query the latest statuses, restarting the jobs controller if stopped.'
)
@click.option('--skip-finished',
              '-s',
              default=False,
              is_flag=True,
              required=False,
              help='Show only pending/running jobs\' information.')
@flags.all_users_option('Show jobs from all users.')
@flags.all_option('Show all jobs.')
@usage_lib.entrypoint
# pylint: disable=redefined-builtin
def jobs_queue(verbose: bool, refresh: bool, skip_finished: bool,
               all_users: bool, all: bool, limit: int):
    """Show statuses of managed jobs.

    Each managed jobs can have one of the following statuses:

    - ``PENDING``: Job is waiting for a free slot on the jobs controller to be
      accepted.

    - ``STARTING``: Job is starting (provisioning a cluster for the job).

    - ``RUNNING``: Job is running.

    - ``RECOVERING``: The cluster of the job is recovering from a preemption.

    - ``SUCCEEDED``: Job succeeded.

    - ``CANCELLING``: Job was requested to be cancelled by the user, and the
      cancellation is in progress.

    - ``CANCELLED``: Job was cancelled by the user.

    - ``FAILED``: Job failed due to an error from the job itself.

    - ``FAILED_SETUP``: Job failed due to an error from the job's ``setup``
      commands.

    - ``FAILED_PRECHECKS``: Job failed due to an error from our prechecks such
      as invalid cluster names or an infeasible resource is specified.

    - ``FAILED_NO_RESOURCE``: Job failed due to resources being unavailable
      after a maximum number of retries.

    - ``FAILED_CONTROLLER``: Job failed due to an unexpected error in the spot
      controller.

    If the job failed, either due to user code or resource unavailability, the
    error log can be found with ``sky jobs logs --controller``, e.g.:

    .. code-block:: bash

      sky jobs logs --controller job_id

    This also shows the logs for provisioning and any preemption and recovery
    attempts.

    (Tip) To fetch job statuses every 60 seconds, use ``watch``:

    .. code-block:: bash

      watch -n60 sky jobs queue

    (Tip) To show only the latest 10 jobs, use ``-l/--limit 10``:

    .. code-block:: bash

      sky jobs queue -l 10

    """
    click.secho('Fetching managed job statuses...', fg='cyan')
    with rich_utils.client_status('[cyan]Checking managed jobs[/]'):
        max_num_jobs_to_show = (limit if not all else None)
        fields = _DEFAULT_MANAGED_JOB_FIELDS_TO_GET
        if verbose:
            fields = _VERBOSE_MANAGED_JOB_FIELDS_TO_GET
        if all_users:
            fields = fields + _USER_NAME_FIELD
            if verbose:
                fields = fields + _USER_HASH_FIELD
        # Call both cli_utils.get_managed_job_queue and managed_jobs.pool_status
        # in parallel
        def get_managed_jobs_queue():
            return cli_utils.get_managed_job_queue(refresh=refresh,
                                                   skip_finished=skip_finished,
                                                   all_users=all_users,
                                                   limit=max_num_jobs_to_show,
                                                   fields=fields)

        def get_pool_status():
            try:
                return managed_jobs.pool_status(pool_names=None)
            except Exception:  # pylint: disable=broad-except
                # If pool_status fails, we'll just skip the worker information
                return None

        with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
            managed_jobs_future = executor.submit(get_managed_jobs_queue)
            pool_status_future = executor.submit(get_pool_status)

            (managed_jobs_request_id,
             queue_result_version) = managed_jobs_future.result()
            pool_status_request_id = pool_status_future.result()

        num_jobs, msg = _handle_jobs_queue_request(
            managed_jobs_request_id,
            pool_status_request_id=pool_status_request_id,
            show_all=verbose,
            show_user=all_users,
            max_num_jobs_to_show=max_num_jobs_to_show,
            is_called_by_user=True,
            queue_result_version=queue_result_version,
        )
    if not skip_finished:
        in_progress_only_hint = ''
    else:
        in_progress_only_hint = ' (showing in-progress jobs only)'
    click.echo(f'{colorama.Fore.CYAN}{colorama.Style.BRIGHT}'
               f'Managed jobs{colorama.Style.RESET_ALL}'
               f'{in_progress_only_hint}\n{msg}')
    if max_num_jobs_to_show and num_jobs and max_num_jobs_to_show < num_jobs:
        click.echo(
            f'{colorama.Fore.CYAN}'
            f'Only showing the latest {max_num_jobs_to_show} '
            f'managed jobs'
            f'(use --limit to show more managed jobs or '
            f'--all to show all managed jobs) {colorama.Style.RESET_ALL} ')


@jobs.command('cancel', cls=_DocumentedCodeCommand)
@flags.config_option(expose_value=False)
@click.option('--name',
              '-n',
              required=False,
              type=str,
              help='Managed job name to cancel.')
@click.option('--pool',
              '-p',
              required=False,
              type=str,
              help='Pool name to cancel.')
@click.argument('job_ids', default=None, type=int, required=False, nargs=-1)
@flags.all_option('Cancel all managed jobs for the current user.')
@flags.yes_option()
@flags.all_users_option('Cancel all managed jobs from all users.')
@usage_lib.entrypoint
# pylint: disable=redefined-builtin
def jobs_cancel(
    name: Optional[str],
    pool: Optional[str],  # pylint: disable=redefined-outer-name
    job_ids: Tuple[int],
    all: bool,
    yes: bool,
    all_users: bool,
):
    """Cancel managed jobs.

    You can provide either a job name or a list of job IDs to be cancelled.
    They are exclusive options.

    Examples:

    .. code-block:: bash

      # Cancel managed job with name 'my-job'
      $ sky jobs cancel -n my-job
      \b
      # Cancel managed jobs with IDs 1, 2, 3
      $ sky jobs cancel 1 2 3
      \b
      # Cancel all managed jobs in pool 'my-pool'
      $ sky jobs cancel -p my-pool
    """
    job_id_str = ','.join(map(str, job_ids))
    if sum([
            bool(job_ids), name is not None, pool is not None, all or all_users
    ]) != 1:
        arguments = []
        arguments += [f'--job-ids {job_id_str}'] if job_ids else []
        arguments += [f'--name {name}'] if name is not None else []
        arguments += [f'--pool {pool}'] if pool is not None else []
        arguments += ['--all'] if all else []
        arguments += ['--all-users'] if all_users else []
        raise click.UsageError(
            'Can only specify one of JOB_IDS, --name, --pool, or '
            f'--all/--all-users. Provided {" ".join(arguments)!r}.')

    if not yes:
        plural = 's' if len(job_ids) > 1 else ''
        job_identity_str = (f'managed job{plural} with ID{plural} {job_id_str}'
                            if job_ids else f'{name!r}' if name is not None else
                            f'managed jobs in pool {pool!r}')
        if all_users:
            job_identity_str = 'all managed jobs FOR ALL USERS'
        elif all:
            job_identity_str = 'all managed jobs'
        click.confirm(f'Cancelling {job_identity_str}. Proceed?',
                      default=True,
                      abort=True,
                      show_default=True)

    sdk.stream_and_get(
        managed_jobs.cancel(job_ids=job_ids,
                            name=name,
                            pool=pool,
                            all=all,
                            all_users=all_users))


@jobs.command('logs', cls=_DocumentedCodeCommand)
@flags.config_option(expose_value=False)
@click.option('--name',
              '-n',
              required=False,
              type=str,
              help='Managed job name.')
@click.option(
    '--follow/--no-follow',
    is_flag=True,
    default=True,
    help=('Follow the logs of the job. [default: --follow] '
          'If --no-follow is specified, print the log so far and exit.'))
@click.option(
    '--controller',
    is_flag=True,
    default=False,
    help=('Show the controller logs of this job; useful for debugging '
          'launching/recoveries, etc.'))
@click.option(
    '--refresh',
    '-r',
    default=False,
    is_flag=True,
    required=False,
    help='Query the latest job logs, restarting the jobs controller if stopped.'
)
@click.option('--sync-down',
              '-s',
              default=False,
              is_flag=True,
              required=False,
              help='Download logs for all jobs shown in the queue.')
@click.argument('job_id', required=False, type=int)
@usage_lib.entrypoint
def jobs_logs(name: Optional[str], job_id: Optional[int], follow: bool,
              controller: bool, refresh: bool, sync_down: bool):
    """Tail or sync down the log of a managed job."""
    try:
        if sync_down:
            with rich_utils.client_status(
                    ux_utils.spinner_message('Downloading jobs logs')):
                log_local_path_dict = managed_jobs.download_logs(
                    name=name,
                    job_id=job_id,
                    controller=controller,
                    refresh=refresh)
            style = colorama.Style
            fore = colorama.Fore
            controller_str = ' (controller)' if controller else ''
            for job, log_local_path in log_local_path_dict.items():
                logger.info(f'{fore.CYAN}Job {job} logs{controller_str}: '
                            f'{log_local_path}{style.RESET_ALL}')
        else:
            returncode = managed_jobs.tail_logs(name=name,
                                                job_id=job_id,
                                                follow=follow,
                                                controller=controller,
                                                refresh=refresh)
            sys.exit(returncode)
    except exceptions.ClusterNotUpError:
        with ux_utils.print_exception_no_traceback():
            raise


@jobs.command('dashboard', cls=_DocumentedCodeCommand)
@flags.config_option(expose_value=False)
@usage_lib.entrypoint
def jobs_dashboard():
    """Opens a dashboard for managed jobs."""
    sdk.dashboard(starting_page='jobs')


@jobs.group(cls=_NaturalOrderGroup)
def pool():
    """(Experimental) Pool management commands."""
    pass


# TODO(MaoZiming): Update Doc.
# TODO(MaoZiming): Expose mix replica traffic option to user.
# Currently, we do not mix traffic from old and new replicas.
@pool.command('apply', cls=_DocumentedCodeCommand)
@flags.config_option(expose_value=False)
@click.argument('pool_yaml',
                required=False,
                type=str,
                nargs=-1,
                **_get_shell_complete_args(_complete_file_name))
@click.option('--pool',
              '-p',
              default=None,
              type=str,
              help='A pool name. Unique for each pool. If not provided, '
              'a unique name is autogenerated.')
@click.option('--mode',
              default=serve_lib.DEFAULT_UPDATE_MODE.value,
              type=click.Choice([m.value for m in serve_lib.UpdateMode],
                                case_sensitive=False),
              required=False,
              help=('Update mode. If "rolling", pool will be updated '
                    'with rolling update. If "blue_green", pool will '
                    'be updated with blue-green update. This option is only '
                    'valid when the pool is already running.'))
@click.option('--workers',
              default=None,
              type=int,
              required=False,
              help='Can be used to update the number of workers in the pool.')
@_add_click_options(flags.TASK_OPTIONS + flags.EXTRA_RESOURCES_OPTIONS +
                    flags.COMMON_OPTIONS)
@flags.yes_option()
@timeline.event
@usage_lib.entrypoint
def jobs_pool_apply(
    pool_yaml: Optional[Tuple[str, ...]],
    pool: Optional[str],  # pylint: disable=redefined-outer-name
    workdir: Optional[str],
    infra: Optional[str],
    cloud: Optional[str],
    region: Optional[str],
    zone: Optional[str],
    num_nodes: Optional[int],
    use_spot: Optional[bool],
    image_id: Optional[str],
    env_file: Optional[Dict[str, str]],
    env: List[Tuple[str, str]],
    secret: List[Tuple[str, str]],
    gpus: Optional[str],
    instance_type: Optional[str],
    ports: Tuple[str],
    cpus: Optional[str],
    memory: Optional[str],
    disk_size: Optional[int],
    disk_tier: Optional[str],
    network_tier: Optional[str],
    mode: str,
    workers: Optional[int],
    yes: bool,
    async_call: bool,
):
    """Either apply a config to a pool for managed jobs submission
    or update the number of workers in the pool. One of POOL_YAML or --workers
    must be provided.
    Config:
        If the pool is already running, the config will be applied to the pool.
        Otherwise, a new pool will be created.
    Workers:
        The --workers option can be used to override the number of workers
        specified in the YAML file, or to update workers without a YAML file.
        Example:
            sky jobs pool apply -p my-pool --workers 5
    """
    cloud, region, zone = _handle_infra_cloud_region_zone_options(
        infra, cloud, region, zone)
    if workers is not None and pool_yaml is not None and len(pool_yaml) > 0:
        raise click.UsageError(
            'Cannot specify both --workers and POOL_YAML. Please use one of '
            'them.')

    if pool_yaml is None or len(pool_yaml) == 0:
        if pool is None:
            raise click.UsageError(
                'A pool name must be provided to update the number of workers.')
        task = None
        click.secho(f'Attempting to update {pool} to have {workers} workers',
                    fg='cyan')
    else:
        if pool is None:
            pool = serve_lib.generate_service_name(pool=True)

        task = _generate_task_with_service(
            service_name=pool,
            service_yaml_args=pool_yaml,
            workdir=workdir,
            cloud=cloud,
            region=region,
            zone=zone,
            gpus=gpus,
            cpus=cpus,
            memory=memory,
            instance_type=instance_type,
            num_nodes=num_nodes,
            use_spot=use_spot,
            image_id=image_id,
            env_file=env_file,
            env=env,
            secret=secret,
            disk_size=disk_size,
            disk_tier=disk_tier,
            network_tier=network_tier,
            ports=ports,
            not_supported_cmd='sky jobs pool up',
            pool=True,
        )
        assert task.service is not None
        if not task.service.pool:
            raise click.UsageError('The YAML file needs a `pool` section.')
        click.secho('Pool spec:', fg='cyan')
        click.echo(task.service)
        serve_lib.validate_service_task(task, pool=True)

        click.secho(
            'Each pool worker will use the following resources (estimated):',
            fg='cyan')
        with dag_lib.Dag() as dag:
            dag.add(task)

    request_id = managed_jobs.pool_apply(task,
                                         pool,
                                         workers=workers,
                                         mode=serve_lib.UpdateMode(mode),
                                         _need_confirmation=not yes)
    _async_call_or_wait(request_id, async_call, 'sky.jobs.pool_apply')


@pool.command('status', cls=_DocumentedCodeCommand)
@flags.config_option(expose_value=False)
@flags.verbose_option()
@click.argument('pool_names', required=False, type=str, nargs=-1)
@usage_lib.entrypoint
# pylint: disable=redefined-builtin
def jobs_pool_status(verbose: bool, pool_names: List[str]):
    """Show statuses of pools.

    Show detailed statuses of one or more pools. If POOL_NAME is not
    provided, show all pools' status.
    """
    pool_names_to_query: Optional[List[str]] = pool_names
    if not pool_names:
        pool_names_to_query = None
    with rich_utils.client_status('[cyan]Checking pools[/]'):
        pool_status_request_id = managed_jobs.pool_status(pool_names_to_query)
        _, msg = _handle_services_request(pool_status_request_id,
                                          service_names=pool_names_to_query,
                                          show_all=verbose,
                                          show_endpoint=False,
                                          pool=True,
                                          is_called_by_user=True)

    click.echo(f'{colorama.Fore.CYAN}{colorama.Style.BRIGHT}'
               f'Pools{colorama.Style.RESET_ALL}')
    click.echo(msg)


@pool.command('down', cls=_DocumentedCodeCommand)
@flags.config_option(expose_value=False)
@click.argument('pool_names', required=False, type=str, nargs=-1)
@flags.all_option('Delete all pools.')
@click.option('--purge',
              '-p',
              default=False,
              is_flag=True,
              help='Tear down pools in failed status.')
@flags.yes_option()
@_add_click_options(flags.COMMON_OPTIONS)
@usage_lib.entrypoint
# pylint: disable=redefined-builtin
def jobs_pool_down(
    pool_names: List[str],
    all: bool,
    purge: bool,
    yes: bool,
    async_call: bool,
) -> None:
    """Delete pool(s).

    POOL_NAMES is the name of the pool (or glob pattern) to delete. If
    both POOL_NAMES and ``--all`` are supplied, the latter takes precedence.

    Deleting a pool will delete all of its workers and associated resources.
    """
    if sum([bool(pool_names), all]) != 1:
        argument_str = (f'POOL_NAMES={",".join(pool_names)}'
                        if pool_names else '')
        argument_str += ' --all' if all else ''
        raise click.UsageError('Can only specify one of POOL_NAMES or --all. '
                               f'Provided {argument_str!r}.')

    def _get_nonterminal_jobs(pool_names: List[str],
                              all: bool) -> List[responses.ManagedJobRecord]:
        # Get nonterminal jobs for this pool using managed_jobs.queue
        request_id, queue_result_version = cli_utils.get_managed_job_queue(
            refresh=False,
            skip_finished=True,
            all_users=True,
            limit=None,
            fields=['job_id', 'status', 'pool'],
        )
        jobs_result = sdk.stream_and_get(request_id)

        # Handle both tuple and list responses
        jobs_list: List[responses.ManagedJobRecord]
        if queue_result_version.v2():
            jobs_list = jobs_result[0]
        else:
            jobs_list = typing.cast(List[responses.ManagedJobRecord],
                                    jobs_result)

        def _should_include_job(job: responses.ManagedJobRecord) -> bool:
            # Job must not be terminal.
            if job.get('status', ManagedJobStatus.SUCCEEDED).is_terminal():
                return False
            # If len is 0 then we are using -a option, so we include all jobs
            # if they're associated with a pool.
            if all:
                return job.get('pool') is not None
            # Otherwise we are using specific pool names, so we include the job
            # if it's associated with one of the specified pools.
            return job.get('pool') in pool_names

        # Filter jobs by pool name and ensure nonterminal
        pool_jobs = [job for job in jobs_list if _should_include_job(job)]
        return pool_jobs

    quoted_pool_names = [f'{name!r}' for name in pool_names]
    list_pool_str = ', '.join(quoted_pool_names)
    pool_identity_str = f'pool(s) {list_pool_str}'
    if all:
        pool_identity_str = 'all pools'

    already_confirmed = False
    try:
        pool_jobs = _get_nonterminal_jobs(pool_names, all)
        if pool_jobs:
            num_jobs = len(pool_jobs)
            job_ids = [job['job_id'] for job in pool_jobs]
            job_ids_str = ','.join(str(job_id) for job_id in job_ids)
            click.echo(
                f'{colorama.Fore.YELLOW}Pool(s) has {num_jobs} '
                f'nonterminal jobs: {job_ids_str} so it is not yet safe to down'
                f'.{colorama.Style.RESET_ALL}')
            if not yes:
                should_cancel = click.confirm(
                    'Would you like to cancel all jobs and down the pool(s)?',
                    default=False,
                    abort=False,
                    show_default=True)
                if not should_cancel:
                    raise click.Abort()
                already_confirmed = True

            # Cancel all jobs in the pool
            with rich_utils.client_status(
                    ux_utils.spinner_message(
                        f'Cancelling {num_jobs} jobs in {pool_identity_str}...')
            ):
                try:
                    sdk.get(managed_jobs.cancel(job_ids=job_ids))
                except Exception as e:
                    logger.warning(f'Failed to cancel jobs: {e}.')
                    raise e

                max_wait_time = 300  # 5 minutes max wait
                check_interval = 2  # Check every 2 seconds
                start_time = time.time()
                remaining_pool_jobs = _get_nonterminal_jobs(pool_names, all)
                while (remaining_pool_jobs and
                       time.time() - start_time < max_wait_time):
                    # Check remaining jobs via API
                    time.sleep(check_interval)
                    remaining_pool_jobs = _get_nonterminal_jobs(pool_names, all)
                    ux_utils.spinner_message(
                        f'Waiting for {len(remaining_pool_jobs)} '
                        'jobs to be cancelled...')

                click.echo('\r' + ' ' * 80 + '\r', nl=False)
                if time.time() - start_time >= max_wait_time:
                    click.echo(
                        f'{colorama.Fore.YELLOW}Warning: Timeout waiting '
                        f'for jobs to finish. Proceeding with pool down '
                        f'anyway.{colorama.Style.RESET_ALL}')
                else:
                    click.echo('All jobs cancelled.')
    except Exception as e:  # pylint: disable=broad-except
        # If API call fails, log warning but continue with pool down
        logger.warning(
            f'Failed to check for running jobs in pool(s): {pool_names!r}: {e}.'
            ' Proceeding with pool down.')

    if not yes and not already_confirmed:
        click.confirm(f'Terminating {pool_identity_str}. Proceed?',
                      default=True,
                      abort=True,
                      show_default=True)

    request_id = managed_jobs.pool_down(pool_names, all=all, purge=purge)
    _async_call_or_wait(request_id, async_call, 'sky.jobs.pool_down')


def _handle_serve_logs(
        service_name: str,
        follow: bool,
        controller: bool,
        load_balancer: bool,
        replica_ids: Tuple[int, ...],
        sync_down: bool,
        tail: Optional[int],
        pool: bool,  # pylint: disable=redefined-outer-name
):
    noun = 'pool' if pool else 'service'
    capnoun = noun.capitalize()
    repnoun = 'worker' if pool else 'replica'
    if tail is not None:
        if tail < 0:
            raise click.UsageError('--tail must be a non-negative integer.')
        # TODO(arda): We could add ability to tail and follow logs together.
        if follow:
            follow = False
            logger.warning(
                f'{colorama.Fore.YELLOW}'
                '--tail and --follow cannot be used together. '
                f'Changed the mode to --no-follow.{colorama.Style.RESET_ALL}')

    chosen_components: Set[serve_lib.ServiceComponent] = set()
    if controller:
        chosen_components.add(serve_lib.ServiceComponent.CONTROLLER)
    if load_balancer:
        assert not pool, 'Load balancer is not supported for pools.'
        chosen_components.add(serve_lib.ServiceComponent.LOAD_BALANCER)
    # replica_ids contains the specific replica IDs provided by the user.
    # If it's not empty, it implies the user wants replica logs.
    if replica_ids:
        chosen_components.add(serve_lib.ServiceComponent.REPLICA)

    if sync_down:
        # For sync-down, multiple targets are allowed.
        # If no specific components/replicas are mentioned, sync all.
        # Note: Multiple replicas or targets can only be specified when
        # using --sync-down.
        targets_to_sync = list(chosen_components)
        if not targets_to_sync and not replica_ids:
            # Default to all components if nothing specific is requested
            targets_to_sync = [
                serve_lib.ServiceComponent.CONTROLLER,
                serve_lib.ServiceComponent.REPLICA,
            ]
            if not pool:
                targets_to_sync.append(serve_lib.ServiceComponent.LOAD_BALANCER)

        timestamp = sky_logging.get_run_timestamp()
        log_dir = (pathlib.Path(constants.SKY_LOGS_DIRECTORY) / noun /
                   f'{service_name}_{timestamp}').expanduser()
        log_dir.mkdir(parents=True, exist_ok=True)

        with rich_utils.client_status(
                ux_utils.spinner_message(f'Downloading {noun} logs...')):
            if pool:
                managed_jobs.pool_sync_down_logs(service_name,
                                                 str(log_dir),
                                                 targets=targets_to_sync,
                                                 worker_ids=list(replica_ids),
                                                 tail=tail)
            else:
                serve_lib.sync_down_logs(service_name,
                                         str(log_dir),
                                         targets=targets_to_sync,
                                         replica_ids=list(replica_ids),
                                         tail=tail)
        style = colorama.Style
        fore = colorama.Fore
        logger.info(f'{fore.CYAN}{capnoun} {service_name} logs: '
                    f'{log_dir}{style.RESET_ALL}')
        return

    # Tailing requires exactly one target.
    num_targets = len(chosen_components)
    # If REPLICA component is chosen, len(replica_ids) must be 1 for tailing.
    if serve_lib.ServiceComponent.REPLICA in chosen_components:
        if len(replica_ids) != 1:
            raise click.UsageError(
                f'Can only tail logs from a single {repnoun} at a time. '
                f'Provide exactly one {repnoun.upper()}_ID or use --sync-down '
                f'to download logs from multiple {repnoun}s.')
        # If replica is chosen and len is 1, num_targets effectively counts it.
        # We need to ensure no other component (controller/LB) is selected.
        if num_targets > 1:
            raise click.UsageError(
                'Can only tail logs from one target at a time (controller, '
                f'load balancer, or a single {repnoun}). Use --sync-down '
                'to download logs from multiple sources.')
    elif num_targets == 0:
        raise click.UsageError(
            'Specify a target to tail: --controller, --load-balancer, or '
            f'a {repnoun.upper()}_ID.')
    elif num_targets > 1:
        raise click.UsageError(
            'Can only tail logs from one target at a time. Use --sync-down '
            'to download logs from multiple sources.')

    # At this point, we have exactly one target for tailing.
    assert len(chosen_components) == 1
    assert len(replica_ids) in [0, 1]
    target_component = chosen_components.pop()
    target_replica_id: Optional[int] = replica_ids[0] if replica_ids else None

    try:
        if pool:
            managed_jobs.pool_tail_logs(service_name,
                                        target=target_component,
                                        worker_id=target_replica_id,
                                        follow=follow,
                                        tail=tail)
        else:
            serve_lib.tail_logs(service_name,
                                target=target_component,
                                replica_id=target_replica_id,
                                follow=follow,
                                tail=tail)
    except exceptions.ClusterNotUpError:
        with ux_utils.print_exception_no_traceback():
            raise


@pool.command('logs', cls=_DocumentedCodeCommand)
@flags.config_option(expose_value=False)
@click.option(
    '--follow/--no-follow',
    is_flag=True,
    default=True,
    help=('Follow the logs of the job. [default: --follow] '
          'If --no-follow is specified, print the log so far and exit.'))
@click.option('--controller',
              is_flag=True,
              default=False,
              required=False,
              help='Show the controller logs of this pool.')
@click.option('--sync-down',
              '-s',
              is_flag=True,
              default=False,
              help='Sync down logs to the local machine. Can be combined with '
              '--controller or worker ID to narrow scope.')
@click.option(
    '--tail',
    default=None,
    type=int,
    help='The number of lines to display from the end of the log file. '
    'Default is None, which means print all lines.')
@click.argument('pool_name', required=True, type=str)
@click.argument('worker_ids', required=False, type=int, nargs=-1)
@usage_lib.entrypoint
# TODO(tian): Add default argument for this CLI if none of the flags are
# specified.
def jobs_pool_logs(
    pool_name: str,
    follow: bool,
    controller: bool,
    worker_ids: Tuple[int, ...],
    sync_down: bool,
    tail: Optional[int],
):
    """Tail or sync down logs of a pool.

    Logs can be tailed from one target (controller, or a single worker) or
    synced down from multiple targets simultaneously.

    Example:

    .. code-block:: bash

        # Tail the controller logs of a pool
        sky jobs pool logs --controller [POOL_NAME]
        \b
        # Print the worker logs so far and exit
        sky jobs pool logs --no-follow [POOL_NAME] 1
        \b
        # Tail the logs of worker 1
        sky jobs pool logs [POOL_NAME] 1
        \b
        # Show the last 100 lines of the controller logs
        sky jobs pool logs --controller --tail 100 [POOL_NAME]
        \b
        # Sync down all logs of the pool (controller, all workers)
        sky jobs pool logs [POOL_NAME] --sync-down
        \b
        # Sync down controller logs and logs for workers 1 and 3
        sky jobs pool logs [POOL_NAME] 1 3 --controller --sync-down
    """
    _handle_serve_logs(pool_name,
                       follow=follow,
                       controller=controller,
                       load_balancer=False,
                       replica_ids=worker_ids,
                       sync_down=sync_down,
                       tail=tail,
                       pool=True)


@cli.command(cls=_DocumentedCodeCommand)
@flags.config_option(expose_value=False)
@usage_lib.entrypoint
def dashboard() -> None:
    """Opens the SkyPilot dashboard."""
    sdk.dashboard()


@cli.command(cls=_DocumentedCodeCommand, hidden=True)
@flags.config_option(expose_value=False)
@usage_lib.entrypoint
def ui() -> None:
    """Opens the SkyPilot dashboard."""
    sdk.dashboard()


@cli.group(cls=_NaturalOrderGroup)
def serve():
    """SkyServe CLI (multi-region, multi-cloud serving)."""
    pass


def _generate_task_with_service(
    service_name: str,
    service_yaml_args: Tuple[str, ...],
    workdir: Optional[str],
    cloud: Optional[str],
    region: Optional[str],
    zone: Optional[str],
    num_nodes: Optional[int],
    use_spot: Optional[bool],
    image_id: Optional[str],
    env_file: Optional[Dict[str, str]],
    env: List[Tuple[str, str]],
    secret: Optional[List[Tuple[str, str]]],
    gpus: Optional[str],
    instance_type: Optional[str],
    ports: Optional[Tuple[str]],
    cpus: Optional[str],
    memory: Optional[str],
    disk_size: Optional[int],
    disk_tier: Optional[str],
    network_tier: Optional[str],
    not_supported_cmd: str,
    pool: bool,  # pylint: disable=redefined-outer-name
    git_url: Optional[str] = None,
    git_ref: Optional[str] = None,
) -> task_lib.Task:
    """Generate a task with service section from a service YAML file."""
    is_yaml, _ = _check_yaml(''.join(service_yaml_args))
    yaml_name = 'SERVICE_YAML' if not pool else 'POOL_YAML'
    if not is_yaml:
        raise click.UsageError(f'{yaml_name} must be a valid YAML file.')
    env = _merge_env_vars(env_file, env)
    # We keep nargs=-1 in service_yaml argument to reuse this function.
    task = _make_task_or_dag_from_entrypoint_with_overrides(
        service_yaml_args,
        # For Service YAML, we override the `name` field with service name.
        name=service_name,
        workdir=workdir,
        cloud=cloud,
        region=region,
        zone=zone,
        gpus=gpus,
        cpus=cpus,
        memory=memory,
        instance_type=instance_type,
        num_nodes=num_nodes,
        use_spot=use_spot,
        image_id=image_id,
        env=env,
        secret=secret,
        disk_size=disk_size,
        disk_tier=disk_tier,
        network_tier=network_tier,
        ports=ports,
        git_url=git_url,
        git_ref=git_ref,
    )
    if isinstance(task, dag_lib.Dag):
        raise click.UsageError(
            _DAG_NOT_SUPPORTED_MESSAGE.format(command=not_supported_cmd))

    if task.service is None:
        field_name = 'service' if not pool else 'pool'
        with ux_utils.print_exception_no_traceback():
            raise ValueError(f'{field_name.capitalize()} section not found '
                             'in the YAML file. To fix, add a valid '
                             f'`{field_name}` field.')

    if task.service.pool:
        if task.service.ports is not None or ports:
            with ux_utils.print_exception_no_traceback():
                raise ValueError('Cannot specify ports in a pool.')
        return task

    # NOTE(yi): we only allow one service port now.
    service_port: Optional[int] = int(
        task.service.ports) if task.service.ports is not None else None
    if service_port is None:
        for requested_resources in list(task.resources):
            if requested_resources.ports is None:
                with ux_utils.print_exception_no_traceback():
                    raise ValueError(
                        'Must specify at least one ports in resources. Each '
                        'replica will use the port specified as application '
                        'ingress port if only one port is specified in the '
                        'replica resources. If there are multiple ports opened '
                        'in the replica, please set the `service.ports` field '
                        'in the service config.')
            requested_ports = list(
                resources_utils.port_ranges_to_set(requested_resources.ports))
            if len(requested_ports) > 1:
                with ux_utils.print_exception_no_traceback():
                    raise ValueError(
                        'Multiple ports specified in resources. Please '
                        'specify the main port in the `service.ports` field.')
            # We request all the replicas using the same port for now, but it
            # should be fine to allow different replicas to use different ports
            # in the future.
            resource_port = requested_ports[0]
            if service_port is None:
                service_port = resource_port
            if service_port != resource_port:
                with ux_utils.print_exception_no_traceback():
                    raise ValueError(
                        f'Got multiple ports: {service_port} and '
                        f'{resource_port} in different resources. '
                        'Please specify the same port in all replicas, or '
                        'explicitly set the service port in the '
                        '`service.ports` section.')
        assert service_port is not None
        task.service.set_ports(str(service_port))
    else:
        for requested_resources in list(task.resources):
            if requested_resources.ports is None:
                with ux_utils.print_exception_no_traceback():
                    raise ValueError(
                        'Must specify at least one ports in every replica '
                        'resources.')
            ports_set = resources_utils.port_ranges_to_set(
                requested_resources.ports)
            if service_port not in ports_set:
                with ux_utils.print_exception_no_traceback():
                    # TODO(tian): Automatically infer resource port from
                    # service port if none of them is specified in the
                    # replica resources.
                    raise ValueError(
                        f'The service port {service_port} specified in the '
                        'service section is not found in some resources. '
                        'Please check if the service port is correct or add '
                        'the service port to replica resources.')

    return task


@serve.command('up', cls=_DocumentedCodeCommand)
@flags.config_option(expose_value=False)
@click.argument('service_yaml',
                required=True,
                type=str,
                nargs=-1,
                **_get_shell_complete_args(_complete_file_name))
@click.option('--service-name',
              '-n',
              default=None,
              type=str,
              help='A service name. Unique for each service. If not provided, '
              'a unique name is autogenerated.')
@click.option('--git-url', type=str, help='Git repository URL.')
@click.option('--git-ref',
              type=str,
              help='Git reference (branch, tag, or commit hash) to use.')
@_add_click_options(flags.TASK_OPTIONS + flags.EXTRA_RESOURCES_OPTIONS +
                    flags.COMMON_OPTIONS)
@flags.yes_option()
@timeline.event
@usage_lib.entrypoint
def serve_up(
    service_yaml: Tuple[str, ...],
    service_name: Optional[str],
    workdir: Optional[str],
    infra: Optional[str],
    cloud: Optional[str],
    region: Optional[str],
    zone: Optional[str],
    num_nodes: Optional[int],
    use_spot: Optional[bool],
    image_id: Optional[str],
    env_file: Optional[Dict[str, str]],
    env: List[Tuple[str, str]],
    secret: List[Tuple[str, str]],
    gpus: Optional[str],
    instance_type: Optional[str],
    ports: Tuple[str],
    cpus: Optional[str],
    memory: Optional[str],
    disk_size: Optional[int],
    disk_tier: Optional[str],
    network_tier: Optional[str],
    yes: bool,
    async_call: bool,
    git_url: Optional[str] = None,
    git_ref: Optional[str] = None,
):
    """Launch a SkyServe service.

    SERVICE_YAML must point to a valid YAML file.

    A regular task YAML can be turned into a service YAML by adding a `service`
    field. E.g.,

    .. code-block:: yaml

        # service.yaml
        service:
          ports: 8080
          readiness_probe:
            path: /health
            initial_delay_seconds: 20
          replicas: 1

        resources:
          cpus: 2+

        run: python -m http.server 8080

    Example:

    .. code-block:: bash

        sky serve up service.yaml
    """
    cloud, region, zone = _handle_infra_cloud_region_zone_options(
        infra, cloud, region, zone)
    if service_name is None:
        service_name = serve_lib.generate_service_name()

    task = _generate_task_with_service(
        service_name=service_name,
        service_yaml_args=service_yaml,
        workdir=workdir,
        cloud=cloud,
        region=region,
        zone=zone,
        gpus=gpus,
        cpus=cpus,
        memory=memory,
        instance_type=instance_type,
        num_nodes=num_nodes,
        use_spot=use_spot,
        image_id=image_id,
        env_file=env_file,
        env=env,
        secret=secret,
        disk_size=disk_size,
        disk_tier=disk_tier,
        network_tier=network_tier,
        ports=ports,
        not_supported_cmd='sky serve up',
        pool=False,
        git_url=git_url,
        git_ref=git_ref,
    )
    assert task.service is not None
    if task.service.pool:
        raise click.UsageError('The YAML file needs a `service` section.')
    click.secho('Service spec:', fg='cyan')
    click.echo(task.service)
    serve_lib.validate_service_task(task, pool=False)

    click.secho('Each replica will use the following resources (estimated):',
                fg='cyan')
    with dag_lib.Dag() as dag:
        dag.add(task)

    request_id = serve_lib.up(task, service_name, _need_confirmation=not yes)
    _async_call_or_wait(request_id, async_call, 'sky.serve.up')


# TODO(MaoZiming): Update Doc.
# TODO(MaoZiming): Expose mix replica traffic option to user.
# Currently, we do not mix traffic from old and new replicas.
@serve.command('update', cls=_DocumentedCodeCommand)
@flags.config_option(expose_value=False)
@click.argument('service_name', required=True, type=str)
@click.argument('service_yaml',
                required=True,
                type=str,
                nargs=-1,
                **_get_shell_complete_args(_complete_file_name))
@_add_click_options(flags.TASK_OPTIONS + flags.EXTRA_RESOURCES_OPTIONS +
                    flags.COMMON_OPTIONS)
@click.option('--mode',
              default=serve_lib.DEFAULT_UPDATE_MODE.value,
              type=click.Choice([m.value for m in serve_lib.UpdateMode],
                                case_sensitive=False),
              required=False,
              help=('Update mode. If "rolling", SkyServe will update the '
                    'service with rolling update. If "blue_green", SkyServe '
                    'will update the service with blue-green update. '))
@flags.yes_option()
@timeline.event
@usage_lib.entrypoint
def serve_update(
        service_name: str, service_yaml: Tuple[str,
                                               ...], workdir: Optional[str],
        infra: Optional[str], cloud: Optional[str], region: Optional[str],
        zone: Optional[str], num_nodes: Optional[int], use_spot: Optional[bool],
        image_id: Optional[str], env_file: Optional[Dict[str, str]],
        env: List[Tuple[str, str]], secret: List[Tuple[str, str]],
        gpus: Optional[str], instance_type: Optional[str], ports: Tuple[str],
        cpus: Optional[str], memory: Optional[str], disk_size: Optional[int],
        disk_tier: Optional[str], network_tier: Optional[str], mode: str,
        yes: bool, async_call: bool):
    """Update a SkyServe service.

    service_yaml must point to a valid YAML file.

    SkyServe will reuse old replicas, if only the service section is changed
    and no file mounts are specified.

    Otherwise, SkyServe will terminate the old replicas and start new replicas.

    Two update modes are supported:

    - "rolling": (default) SkyServe will update the service with rolling update,
        i.e., it will terminate one old replica whenever one new replica is
        ready. Traffic can be mixed on old and new replicas.
    - "blue_green": SkyServe will update the service with blue-green update,
        i.e., it will wait for new replicas to be ready and then terminate old
        replicas. Traffic will only be switched from old to new replicas after
        enough new replicas are ready.

    Example:

    .. code-block:: bash

        # Update an existing service with rolling update
        sky serve update sky-service-16aa new_service.yaml
        # Use blue-green update
        sky serve update --mode blue_green sky-service-16aa new_service.yaml

    """
    # TODO(lloyd-brown): Add a way to update number of replicas for serve
    # the way we did for pools.
    cloud, region, zone = _handle_infra_cloud_region_zone_options(
        infra, cloud, region, zone)
    task = _generate_task_with_service(
        service_name=service_name,
        service_yaml_args=service_yaml,
        workdir=workdir,
        cloud=cloud,
        region=region,
        zone=zone,
        gpus=gpus,
        cpus=cpus,
        memory=memory,
        instance_type=instance_type,
        num_nodes=num_nodes,
        use_spot=use_spot,
        image_id=image_id,
        env_file=env_file,
        env=env,
        secret=secret,
        disk_size=disk_size,
        disk_tier=disk_tier,
        network_tier=network_tier,
        ports=ports,
        not_supported_cmd='sky serve update',
        pool=False,
    )
    click.secho('Service spec:', fg='cyan')
    click.echo(task.service)
    serve_lib.validate_service_task(task, pool=False)

    click.secho('New replica will use the following resources (estimated):',
                fg='cyan')
    with dag_lib.Dag() as dag:
        dag.add(task)

    request_id = serve_lib.update(task,
                                  service_name,
                                  mode=serve_lib.UpdateMode(mode),
                                  _need_confirmation=not yes)
    _async_call_or_wait(request_id, async_call, 'sky.serve.update')


@serve.command('status', cls=_DocumentedCodeCommand)
@flags.config_option(expose_value=False)
@flags.verbose_option()
@click.option('--endpoint',
              default=False,
              is_flag=True,
              required=False,
              help='Show service endpoint.')
@click.argument('service_names', required=False, type=str, nargs=-1)
@usage_lib.entrypoint
# pylint: disable=redefined-builtin
def serve_status(verbose: bool, endpoint: bool, service_names: List[str]):
    """Show statuses of SkyServe services.

    Show detailed statuses of one or more services. If SERVICE_NAME is not
    provided, show all services' status. If --endpoint is specified, output
    the endpoint of the service only.

    Each service can have one of the following statuses:

    - ``CONTROLLER_INIT``: The controller is initializing.

    - ``REPLICA_INIT``: The controller has finished initializing, and there are
      no ready replicas for now. This also indicates that no replica failure
      has been detected.

    - ``CONTROLLER_FAILED``: The controller failed to start or is in an abnormal
      state; or the controller and load balancer processes are not alive.

    - ``READY``: The service is ready to serve requests. At least one replica is
      in READY state (i.e., has passed the readiness probe).

    - ``SHUTTING_DOWN``: The service is being shut down. This usually happens
      when the `sky serve down` command is called.

    - ``FAILED``: At least one replica failed and no replica is ready. This
      could be caused by several reasons:

      - The launching process of the replica failed.

      - No readiness probe passed within initial delay seconds.

      - The replica continuously failed after serving requests for a while.

      - User code failed.

    - ``FAILED_CLEANUP``: Some error occurred while the service was being shut
      down. This usually indicates resource leakages. If you see such status,
      please login to the cloud console and double-check

    - ``NO_REPLICAS``: The service has no replicas. This usually happens when
        min_replicas is set to 0 and there is no traffic to the system.

    Each replica can have one of the following statuses:

    - ``PENDING``: The maximum number of simultaneous launches has been reached
      and the replica launch process is pending.

    - ``PROVISIONING``: The replica is being provisioned.

    - ``STARTING``: Replica provisioning has succeeded and the replica is
      initializing, e.g., installing dependencies or loading model
      weights.

    - ``READY``: The replica is ready to serve requests (i.e., has passed the
      readiness probe).

    - ``NOT_READY``: The replica failed a readiness probe, but has not failed
      the probe for a continuous period of time (otherwise it'd be shut down).
      This usually happens when the replica is suffering from a bad network
      connection or there are too many requests overwhelming the replica.

    - ``SHUTTING_DOWN``: The replica is being shut down. This usually happens
      when the replica is being scaled down, some error occurred, or the
      `sky serve down` command is called. SkyServe will terminate all replicas
      that errored.

    - ``FAILED``: Some error occurred when the replica is serving requests.
      This indicates that the replica is already shut down. (Otherwise, it is
      ``SHUTTING_DOWN``.)

    - ``FAILED_CLEANUP``: Some error occurred while the replica was being shut
      down. This usually indicates resource leakages since the termination
      did not finish correctly. When seeing this status, please login to the
      cloud console and check whether there are some leaked VMs/resources.

    - ``PREEMPTED``: The replica was preempted by the cloud provider and sky
      serve is recovering this replica. This only happens when the replica is
      a spot instance.

    Examples:

    .. code-block:: bash

      # Show status for all services
      sky serve status
      \b
      # Show detailed status for all services
      sky serve status -v
      \b
      # Only show status of my-service
      sky serve status my-service
    """
    service_names_to_query: Optional[List[str]] = service_names
    if not service_names:
        service_names_to_query = None
    # This won't pollute the output of --endpoint.
    with rich_utils.client_status('[cyan]Checking services[/]'):
        service_status_request_id = serve_lib.status(service_names_to_query)
        _, msg = _handle_services_request(service_status_request_id,
                                          service_names=service_names_to_query,
                                          show_all=verbose,
                                          show_endpoint=endpoint,
                                          is_called_by_user=True)

    if not endpoint:
        click.echo(f'{colorama.Fore.CYAN}{colorama.Style.BRIGHT}'
                   f'Services{colorama.Style.RESET_ALL}')
    click.echo(msg)


@serve.command('down', cls=_DocumentedCodeCommand)
@flags.config_option(expose_value=False)
@click.argument('service_names', required=False, type=str, nargs=-1)
@flags.all_option('Tear down all services.')
@click.option('--purge',
              '-p',
              default=False,
              is_flag=True,
              help='Tear down services in failed status.')
@flags.yes_option()
@click.option('--replica-id',
              default=None,
              type=int,
              help='Tear down a given replica')
@_add_click_options(flags.COMMON_OPTIONS)
@usage_lib.entrypoint
# pylint: disable=redefined-builtin
def serve_down(
    service_names: List[str],
    all: bool,
    purge: bool,
    yes: bool,
    replica_id: Optional[int],
    async_call: bool,
) -> None:
    """Teardown service(s).

    SERVICE_NAMES is the name of the service (or glob pattern) to tear down. If
    both SERVICE_NAMES and ``--all`` are supplied, the latter takes precedence.

    Tearing down a service will delete all of its replicas and associated
    resources.

    Example:

    .. code-block:: bash

        # Tear down a specific service.
        sky serve down my-service
        \b
        # Tear down multiple services.
        sky serve down my-service1 my-service2
        \b
        # Tear down all services matching glob pattern 'service-*'.
        sky serve down "service-*"
        \b
        # Tear down all existing services.
        sky serve down -a
        \b
        # Forcefully tear down a service in failed status.
        sky serve down failed-service --purge
        \b
        # Tear down a specific replica
        sky serve down my-service --replica-id 1
        \b
        # Forcefully tear down a specific replica, even in failed status.
        sky serve down my-service --replica-id 1 --purge
    """
    if sum([bool(service_names), all]) != 1:
        argument_str = (f'SERVICE_NAMES={",".join(service_names)}'
                        if service_names else '')
        argument_str += ' --all' if all else ''
        raise click.UsageError(
            'Can only specify one of SERVICE_NAMES or --all. '
            f'Provided {argument_str!r}.')

    replica_id_is_defined = replica_id is not None
    if replica_id_is_defined:
        if len(service_names) != 1:
            service_names_str = ', '.join(service_names)
            raise click.UsageError(f'The --replica-id option can only be used '
                                   f'with a single service name. Got: '
                                   f'{service_names_str}.')
        if all:
            raise click.UsageError('The --replica-id option cannot be used '
                                   'with the --all option.')
    if not yes:
        if replica_id_is_defined:
            click.confirm(
                f'Terminating replica ID {replica_id} in '
                f'{service_names[0]!r}. Proceed?',
                default=True,
                abort=True,
                show_default=True)
        else:
            quoted_service_names = [f'{name!r}' for name in service_names]
            list_service_str = ', '.join(quoted_service_names)
            service_identity_str = f'service(s) {list_service_str}'
            if all:
                service_identity_str = 'all services'
            click.confirm(f'Terminating {service_identity_str}. Proceed?',
                          default=True,
                          abort=True,
                          show_default=True)

    if replica_id_is_defined:
        assert replica_id is not None
        request_id = serve_lib.terminate_replica(service_names[0], replica_id,
                                                 purge)
    else:
        request_id = serve_lib.down(service_names=service_names,
                                    all=all,
                                    purge=purge)
    _async_call_or_wait(request_id, async_call, 'sky.serve.down')


@serve.command('logs', cls=_DocumentedCodeCommand)
@flags.config_option(expose_value=False)
@click.option(
    '--follow/--no-follow',
    is_flag=True,
    default=True,
    help=('Follow the logs of the job. [default: --follow] '
          'If --no-follow is specified, print the log so far and exit.'))
@click.option('--controller',
              is_flag=True,
              default=False,
              required=False,
              help='Show the controller logs of this service.')
@click.option('--load-balancer',
              is_flag=True,
              default=False,
              required=False,
              help='Show the load balancer logs of this service.')
@click.option('--sync-down',
              '-s',
              is_flag=True,
              default=False,
              help='Sync down logs to the local machine. Can be combined with '
              '--controller, --load-balancer, or a replica ID to narrow scope.')
@click.option(
    '--tail',
    default=None,
    type=int,
    help='The number of lines to display from the end of the log file. '
    'Default is None, which means print all lines.')
@click.argument('service_name', required=True, type=str)
@click.argument('replica_ids', required=False, type=int, nargs=-1)
@usage_lib.entrypoint
# TODO(tian): Add default argument for this CLI if none of the flags are
# specified.
def serve_logs(
    service_name: str,
    follow: bool,
    controller: bool,
    load_balancer: bool,
    replica_ids: Tuple[int, ...],
    sync_down: bool,
    tail: Optional[int],
):
    """Tail or sync down logs of a service.

    Logs can be tailed from one target (controller, load balancer, or a single
    replica) or synced down from multiple targets simultaneously.

    Example:

    .. code-block:: bash

        # Tail the controller logs of a service
        sky serve logs --controller [SERVICE_NAME]
        \b
        # Print the load balancer logs so far and exit
        sky serve logs --load-balancer --no-follow [SERVICE_NAME]
        \b
        # Tail the logs of replica 1
        sky serve logs [SERVICE_NAME] 1
        \b
        # Show the last 100 lines of the controller logs
        sky serve logs --controller --tail 100 [SERVICE_NAME]
        \b
        # Sync down all logs of the service (controller, LB, all replicas)
        sky serve logs [SERVICE_NAME] --sync-down
        \b
        # Sync down controller logs and logs for replicas 1 and 3
        sky serve logs [SERVICE_NAME] 1 3 --controller --sync-down
    """
    _handle_serve_logs(service_name,
                       follow=follow,
                       controller=controller,
                       load_balancer=load_balancer,
                       replica_ids=replica_ids,
                       sync_down=sync_down,
                       tail=tail,
                       pool=False)


@cli.group(cls=_NaturalOrderGroup, hidden=True)
def local():
    """SkyPilot local tools CLI."""
    pass


@click.option('--gpus/--no-gpus',
              default=True,
              is_flag=True,
              help='Launch cluster without GPU support even '
              'if GPUs are detected on the host.')
@click.option(
    '--name',
    type=str,
    required=False,
    help='Name of the cluster. Defaults to "skypilot". Used without ip list.')
@click.option(
    '--port-start',
    type=int,
    required=False,
    help='Starting port range for the local kind cluster. Needs to be a '
    'multiple of 100. If not given, a random range will be used. '
    'Used without ip list.')
@local.command('up', cls=_DocumentedCodeCommand)
@flags.config_option(expose_value=False)
@_add_click_options(flags.COMMON_OPTIONS)
@usage_lib.entrypoint
def local_up(gpus: bool, name: Optional[str], port_start: Optional[int],
             async_call: bool):
    """Creates a local cluster."""
    request_id = sdk.local_up(gpus, name, port_start)
    _async_call_or_wait(request_id, async_call, request_name='local up')


@click.option('--name',
              type=str,
              required=False,
              help='Name of the cluster to down. Defaults to "skypilot".')
@local.command('down', cls=_DocumentedCodeCommand)
@flags.config_option(expose_value=False)
@_add_click_options(flags.COMMON_OPTIONS)
@usage_lib.entrypoint
def local_down(name: Optional[str], async_call: bool):
    """Deletes a local cluster."""
    request_id = sdk.local_down(name)
    _async_call_or_wait(request_id, async_call, request_name='sky.local.down')


@cli.group(cls=_NaturalOrderGroup)
def api():
    """SkyPilot API server commands."""
    pass


@api.command('start', cls=_DocumentedCodeCommand)
@click.option('--deploy',
              type=bool,
              is_flag=True,
              default=False,
              required=False,
              help=('Deploy the SkyPilot API server. When set to True, '
                    'SkyPilot API server will use all resources on the host '
                    'machine assuming the machine is dedicated to SkyPilot API '
                    'server; host will also be set to 0.0.0.0 to allow remote '
                    'access.'))
@click.option('--host',
              default='127.0.0.1',
              type=click.Choice(server_common.AVAILBLE_LOCAL_API_SERVER_HOSTS),
              required=False,
              help=('The host to deploy the SkyPilot API server. To allow '
                    'remote access, set this to 0.0.0.0'))
@click.option('--foreground',
              is_flag=True,
              default=False,
              required=False,
              help='Run the SkyPilot API server in the foreground and output '
              'its logs to stdout/stderr. Allowing external systems '
              'to manage the process lifecycle and collect logs directly. '
              'This is useful when the API server is managed by systems '
              'like systemd and Kubernetes.')
@click.option('--enable-basic-auth',
              is_flag=True,
              default=False,
              required=False,
              help='Enable basic authentication in the SkyPilot API server.')
@usage_lib.entrypoint
def api_start(deploy: bool, host: str, foreground: bool,
              enable_basic_auth: bool):
    """Starts the SkyPilot API server locally."""
    sdk.api_start(deploy=deploy,
                  host=host,
                  foreground=foreground,
                  enable_basic_auth=enable_basic_auth)


@api.command('stop', cls=_DocumentedCodeCommand)
@usage_lib.entrypoint
def api_stop():
    """Stops the SkyPilot API server locally."""
    sdk.api_stop()


@api.command('logs', cls=_DocumentedCodeCommand)
@flags.config_option(expose_value=False)
@click.argument('request_id',
                required=False,
                type=str,
                **_get_shell_complete_args(_complete_api_request))
@click.option('--server-logs',
              is_flag=True,
              default=False,
              required=False,
              help='Stream the server logs.')
@click.option('--log-path',
              '-l',
              required=False,
              type=str,
              help='The path to the log file to stream.')
@click.option('--tail',
              required=False,
              type=int,
              help=('Number of lines to show from the end of the logs. '
                    '(default: None)'))
@click.option('--follow/--no-follow',
              is_flag=True,
              default=True,
              required=False,
              help='Follow the logs.')
@usage_lib.entrypoint
def api_logs(request_id: Optional[str], server_logs: bool,
             log_path: Optional[str], tail: Optional[int], follow: bool):
    """Stream the logs of a request running on SkyPilot API server."""
    if not server_logs and request_id is None and log_path is None:
        # TODO(zhwu): get the latest request ID.
        raise click.BadParameter('Please provide the request ID or log path.')
    if server_logs:
        sdk.api_server_logs(follow=follow, tail=tail)
        return

    if request_id is not None and log_path is not None:
        raise click.BadParameter(
            'Only one of request ID and log path can be provided.')
    # Only wrap request_id when it is provided; otherwise pass None so the
    # server accepts log_path-only streaming.
    req_id = (server_common.RequestId[None](request_id)
              if request_id is not None else None)
    sdk.stream_and_get(req_id, log_path, tail, follow)


@api.command('cancel', cls=_DocumentedCodeCommand)
@flags.config_option(expose_value=False)
@click.argument('request_ids',
                required=False,
                type=str,
                nargs=-1,
                **_get_shell_complete_args(_complete_api_request))
@flags.all_option('Cancel all your requests.')
@flags.all_users_option('Cancel all requests from all users.')
@flags.yes_option()
@usage_lib.entrypoint
# pylint: disable=redefined-builtin
def api_cancel(request_ids: Optional[List[str]], all: bool, all_users: bool,
               yes: bool):
    """Cancel a request running on SkyPilot API server."""
    if all or all_users:
        if not yes:
            keyword = 'ALL USERS\'' if all_users else 'YOUR'
            user_input = click.prompt(
                f'This will cancel all {keyword} requests.\n'
                f'To proceed, please type {colorama.Style.BRIGHT}'
                f'\'cancel all requests\'{colorama.Style.RESET_ALL}',
                type=str)
            if user_input != 'cancel all requests':
                raise click.Abort()
        request_ids = None
    cancelled_request_ids = sdk.get(
        sdk.api_cancel(request_ids=request_ids, all_users=all_users))
    if not cancelled_request_ids:
        click.secho('No requests need to be cancelled.', fg='green')
    elif len(cancelled_request_ids) == 1:
        click.secho(f'Cancelled 1 request: {cancelled_request_ids[0]}',
                    fg='green')
    else:
        click.secho(f'Cancelled {len(cancelled_request_ids)} requests.',
                    fg='green')


class IntOrNone(click.ParamType):
    """Int or None"""
    name = 'int-or-none'

    def convert(self, value, param, ctx):
        if isinstance(value, int):
            return value
        if isinstance(value, str) and value.lower() in ('none', 'all'):
            return None
        try:
            return int(value)
        except ValueError:
            self.fail(f'{value!r} is not a valid integer or "none" or "all"',
                      param, ctx)


INT_OR_NONE = IntOrNone()


@api.command('status', cls=_DocumentedCodeCommand)
@flags.config_option(expose_value=False)
@click.argument('request_id_prefixes',
                required=False,
                type=str,
                nargs=-1,
                **_get_shell_complete_args(_complete_api_request))
@click.option('--all-status',
              '-a',
              is_flag=True,
              default=False,
              required=False,
              help=('Show requests of all statuses, including finished ones '
                    '(SUCCEEDED, FAILED, CANCELLED). By default, only active '
                    'requests (PENDING, RUNNING) are shown.'))
@click.option(
    '--limit',
    '-l',
    default=_NUM_REQUESTS_TO_SHOW,
    type=INT_OR_NONE,
    required=False,
    help=(f'Number of requests to show, default is {_NUM_REQUESTS_TO_SHOW},'
          f' set to "none" or "all" to show all requests.'))
@flags.verbose_option('Show more details.')
@usage_lib.entrypoint
# pylint: disable=redefined-builtin
def api_status(request_id_prefixes: Optional[List[str]], all_status: bool,
               verbose: bool, limit: Optional[int]):
    """List requests on SkyPilot API server."""
    if not request_id_prefixes:
        request_id_prefixes = None
    fields = _DEFAULT_REQUEST_FIELDS_TO_SHOW
    if verbose:
        fields = _VERBOSE_REQUEST_FIELDS_TO_SHOW
    request_list = sdk.api_status(request_id_prefixes, all_status, limit,
                                  fields)
    columns = ['ID', 'User', 'Name']
    if verbose:
        columns.append('Cluster')
    columns.extend(['Created', 'Status'])
    table = log_utils.create_table(columns)
    if len(request_list) > 0:
        for request in request_list:
            r_id = request.request_id
            if not verbose:
                r_id = common_utils.truncate_long_string(r_id, 36)
            req_status = requests.RequestStatus(request.status)
            row = [r_id, request.user_name, request.name]
            if verbose:
                row.append(request.cluster_name)
            row.extend([
                log_utils.readable_time_duration(request.created_at),
                req_status.colored_str()
            ])
            table.add_row(row)
    else:
        # add dummy data for when api server is down.
        dummy_row = ['-'] * 5
        if verbose:
            dummy_row.append('-')
        table.add_row(dummy_row)
    click.echo(table)
    if limit and len(request_list) >= limit:
        click.echo()
        click.echo(
            f'Showing {limit} requests. Use "-l none" or "-l all" to show'
            f' all requests.')


@api.command('login', cls=_DocumentedCodeCommand)
@flags.config_option(expose_value=False)
@click.option('--endpoint',
              '-e',
              required=False,
              help='The SkyPilot API server endpoint.')
@click.option('--relogin',
              is_flag=True,
              default=False,
              help='Force relogin with OAuth2 when enabled.')
@click.option(
    '--service-account-token',
    '--token',
    '-t',
    required=False,
    help='Service account token for authentication (starts with ``sky_``).')
@usage_lib.entrypoint
def api_login(endpoint: Optional[str], relogin: bool,
              service_account_token: Optional[str]):
    """Logs into a SkyPilot API server.

    If your remote API server has enabled OAuth2 authentication, you can use
    one of the following methods to login:

    1. OAuth2 browser-based authentication (default)

    2. Service account token via ``--token`` flag

    3. Service account token in ``~/.sky/config.yaml``

    Examples:

    .. code-block:: bash

      # OAuth2 browser login
      sky api login -e https://api.example.com
      \b
      # Service account token login
      sky api login -e https://api.example.com --token sky_abc123...

    """
    sdk.api_login(endpoint, relogin, service_account_token)


@api.command('logout', cls=_DocumentedCodeCommand)
def api_logout():
    """Logs out of the api server"""
    sdk.api_logout()


@api.command('info', cls=_DocumentedCodeCommand)
@flags.config_option(expose_value=False)
@usage_lib.entrypoint
def api_info():
    """Shows the SkyPilot API server URL."""
    url = server_common.get_server_url()
    api_server_info = sdk.api_info()
    api_server_user = api_server_info.user
    if api_server_user is not None:
        user = api_server_user
    else:
        user = models.User.get_current_user()
    # Print client version and commit.
    click.echo(f'SkyPilot client version: {sky.__version__}, '
               f'commit: {sky.__commit__}')

    config = skypilot_config.get_user_config()
    config = dict(config)

    # Determine where the endpoint is set.
    if constants.SKY_API_SERVER_URL_ENV_VAR in os.environ:
        location = ('Endpoint set via the environment variable '
                    f'{constants.SKY_API_SERVER_URL_ENV_VAR}')
    elif 'endpoint' in config.get('api_server', {}):
        config_path = skypilot_config.resolve_user_config_path()
        if config_path is None:
            location = 'Endpoint set to default local API server.'
        else:
            location = f'Endpoint set via {config_path}'
    else:
        location = 'Endpoint set to default local API server.'
    click.echo(f'Using SkyPilot API server and dashboard: {url}\n'
               f'{ux_utils.INDENT_SYMBOL}Status: {api_server_info.status}, '
               f'commit: {api_server_info.commit}, '
               f'version: {api_server_info.version}\n'
               f'{ux_utils.INDENT_SYMBOL}User: {user.name} ({user.id})\n'
               f'{ux_utils.INDENT_LAST_SYMBOL}{location}')


@cli.group(cls=_NaturalOrderGroup)
def ssh():
    """Commands for managing SSH Node Pools."""
    pass


@ssh.command('up', cls=_DocumentedCodeCommand)
@click.option(
    '--infra',
    help='Name of the cluster to set up in ~/.sky/ssh_node_pools.yaml. '
    'If not specified, all clusters in the file will be set up.')
@click.option('--async',
              'async_call',
              is_flag=True,
              hidden=True,
              help='Run the command asynchronously.')
@click.option('--file',
              '-f',
              required=False,
              help='The file containing the SSH targets.')
def ssh_up(infra: Optional[str], async_call: bool, file: Optional[str]):
    """Set up a cluster using SSH targets from a file. If not specified,
    ~/.sky/ssh_node_pools.yaml will be used.

    This command sets up a Kubernetes cluster on the machines specified in the
    config file and configures SkyPilot to use it.
    """
    request_id = sdk.ssh_up(infra=infra, file=file)
    if async_call:
        print(f'Request submitted with ID: {request_id}')
    else:
        sdk.stream_and_get(request_id)


@ssh.command('down', cls=_DocumentedCodeCommand)
@click.option(
    '--infra',
    help='Name of the cluster to clean up in ~/.sky/ssh_node_pools.yaml. '
    'If not specified, all clusters in the file will be cleaned up.')
@click.option('--async',
              'async_call',
              is_flag=True,
              hidden=True,
              help='Run the command asynchronously.')
def ssh_down(infra, async_call):
    """Clean up a cluster set up with 'sky ssh up'.

    This command removes the Kubernetes installation from the machines specified
    in ~/.sky/ssh_node_pools.yaml.
    """
    request_id = sdk.ssh_down(infra=infra)
    if async_call:
        print(f'Request submitted with ID: {request_id}')
    else:
        sdk.stream_and_get(request_id)


def main():
    return cli()


if __name__ == '__main__':
    main()
