"""ReplicaManager: handles the creation and deletion of endpoint replicas."""
import dataclasses
import functools
from multiprocessing import pool as mp_pool
import os
import pathlib
import threading
import time
import traceback
import typing
from typing import Any, Dict, List, Optional, Tuple

import colorama
import filelock
import requests

from sky import backends
from sky import exceptions
from sky import global_user_state
from sky import sky_logging
from sky import task as task_lib
from sky.backends import backend_utils
from sky.client import sdk
from sky.serve import constants as serve_constants
from sky.serve import serve_state
from sky.serve import serve_utils
from sky.serve import service
from sky.serve import spot_placer
from sky.skylet import constants
from sky.skylet import job_lib
from sky.usage import usage_lib
from sky.utils import common_utils
from sky.utils import context
from sky.utils import controller_utils
from sky.utils import env_options
from sky.utils import resources_utils
from sky.utils import status_lib
from sky.utils import thread_utils
from sky.utils import ux_utils
from sky.utils import yaml_utils

if typing.TYPE_CHECKING:
    import logging

    from sky.serve import service_spec

logger = sky_logging.init_logger(__name__)

_JOB_STATUS_FETCH_INTERVAL = 30
_PROCESS_POOL_REFRESH_INTERVAL = 20
_RETRY_INIT_GAP_SECONDS = 60
_DEFAULT_DRAIN_SECONDS = 120
_WAIT_LAUNCH_THREAD_TIMEOUT_SECONDS = 15

# TODO(tian): Backward compatibility. Remove this after 3 minor release, i.e.
# 0.13.0. We move the ProcessStatus to common_utils.ProcessStatus in #6666, but
# old ReplicaInfo in database will still tries to unpickle using ProcessStatus
# in replica_managers. We set this alias to avoid breaking changes. See #6729
# for more details.
ProcessStatus = common_utils.ProcessStatus


# TODO(tian): Combine this with
# sky/spot/recovery_strategy.py::StrategyExecutor::launch
# Use context.contextual to enable per-launch output redirection.
@context.contextual
def launch_cluster(replica_id: int,
                   yaml_content: str,
                   cluster_name: str,
                   log_file: str,
                   replica_to_request_id: thread_utils.ThreadSafeDict[int, str],
                   replica_to_launch_cancelled: thread_utils.ThreadSafeDict[
                       int, bool],
                   resources_override: Optional[Dict[str, Any]] = None,
                   retry_until_up: bool = True,
                   max_retry: int = 3) -> None:
    """Launch a sky serve replica cluster.

    This function will not wait for the job starts running. It will return
    immediately after the job is submitted.

    Raises:
        RuntimeError: If failed to launch the cluster after max_retry retries,
            or some error happened before provisioning and will happen again
            if retry.
    """
    ctx = context.get()
    assert ctx is not None, 'Context is not initialized'
    ctx.redirect_log(pathlib.Path(log_file))

    if resources_override is not None:
        logger.info(f'Scaling up replica (id: {replica_id}) cluster '
                    f'{cluster_name} with resources override: '
                    f'{resources_override}')
    try:
        task = task_lib.Task.from_yaml_str(yaml_content)
        if resources_override is not None:
            resources = task.resources
            overrided_resources = [
                r.copy(**resources_override) for r in resources
            ]
            task.set_resources(type(resources)(overrided_resources))
        task.update_envs({serve_constants.REPLICA_ID_ENV_VAR: str(replica_id)})

        logger.info(f'Launching replica (id: {replica_id}) cluster '
                    f'{cluster_name} with resources: {task.resources}')
    except Exception as e:  # pylint: disable=broad-except
        logger.error('Failed to construct task object from yaml file with '
                     f'error {common_utils.format_exception(e)}')
        raise RuntimeError(
            f'Failed to launch the sky serve replica cluster {cluster_name} '
            'due to failing to initialize sky.Task from yaml file.') from e

    def _check_is_cancelled() -> bool:
        is_cancelled = replica_to_launch_cancelled.get(replica_id, False)
        if is_cancelled:
            logger.info(f'Replica {replica_id} launch cancelled.')
            # Pop the value to indicate that the signal was received.
            replica_to_launch_cancelled.pop(replica_id)
        return is_cancelled

    retry_cnt = 0
    backoff = common_utils.Backoff(_RETRY_INIT_GAP_SECONDS)
    while True:
        retry_cnt += 1
        try:
            if _check_is_cancelled():
                return
            usage_lib.messages.usage.set_internal()
            request_id = sdk.launch(task,
                                    cluster_name,
                                    retry_until_up=retry_until_up,
                                    _is_launched_by_sky_serve_controller=True)
            logger.info(f'Replica cluster {cluster_name} launch requested '
                        f'with request_id: {request_id}.')
            replica_to_request_id[replica_id] = request_id
            sdk.stream_and_get(request_id)
            logger.info(f'Replica cluster {cluster_name} launched.')
        except (exceptions.InvalidClusterNameError,
                exceptions.NoCloudAccessError,
                exceptions.ResourcesMismatchError) as e:
            logger.error('Failure happened before provisioning. '
                         f'{common_utils.format_exception(e)}')
            raise RuntimeError('Failed to launch the sky serve replica '
                               f'cluster {cluster_name}.') from e
        except exceptions.ResourcesUnavailableError as e:
            if not any(
                    isinstance(err, exceptions.ResourcesUnavailableError)
                    for err in e.failover_history):
                raise RuntimeError('Failed to launch the sky serve replica '
                                   f'cluster {cluster_name}.') from e
            logger.info('Failed to launch the sky serve replica cluster with '
                        f'error: {common_utils.format_exception(e)})')
        except Exception as e:  # pylint: disable=broad-except
            logger.info('Failed to launch the sky serve replica cluster with '
                        f'error: {common_utils.format_exception(e)})')
            with ux_utils.enable_traceback():
                logger.info(f'  Traceback: {traceback.format_exc()}')
        else:  # No exception, the launch succeeds.
            return

        # Cleanup the request id and the failed cluster.
        replica_to_request_id.pop(replica_id)
        # If it is cancelled, no need to terminate the cluster. It will be
        # handled by the termination thread.
        if _check_is_cancelled():
            return
        terminate_cluster(cluster_name, log_file=log_file)

        if retry_cnt >= max_retry:
            raise RuntimeError('Failed to launch the sky serve replica cluster '
                               f'{cluster_name} after {max_retry} retries.')

        gap_seconds = backoff.current_backoff()
        logger.info('Retrying to launch the sky serve replica cluster '
                    f'in {gap_seconds:.1f} seconds.')
        start_backoff = time.time()
        # Check if it is cancelled every 0.1 seconds.
        while time.time() - start_backoff < gap_seconds:
            if _check_is_cancelled():
                return
            time.sleep(0.1)


# TODO(tian): Combine this with
# sky/spot/recovery_strategy.py::terminate_cluster
@context.contextual
def terminate_cluster(cluster_name: str,
                      log_file: str,
                      replica_drain_delay_seconds: int = 0,
                      max_retry: int = 3) -> None:
    """Terminate the sky serve replica cluster."""
    # Setup logging redirection.
    ctx = context.get()
    assert ctx is not None, 'Context is not initialized'
    ctx.redirect_log(pathlib.Path(log_file))

    logger.info(f'Terminating replica cluster {cluster_name} with '
                f'replica_drain_delay_seconds: {replica_drain_delay_seconds}')
    time.sleep(replica_drain_delay_seconds)
    retry_cnt = 0
    backoff = common_utils.Backoff()
    while True:
        retry_cnt += 1
        try:
            usage_lib.messages.usage.set_internal()
            logger.info(f'Sending down request to cluster {cluster_name}')
            request_id = sdk.down(cluster_name)
            sdk.stream_and_get(request_id)
            logger.info(f'Replica cluster {cluster_name} terminated.')
            return
        except ValueError:
            # The cluster is already terminated.
            logger.info(
                f'Replica cluster {cluster_name} is already terminated.')
            return
        except Exception as e:  # pylint: disable=broad-except
            if retry_cnt >= max_retry:
                raise RuntimeError('Failed to terminate the sky serve replica '
                                   f'cluster {cluster_name}.') from e
            gap_seconds = backoff.current_backoff()
            logger.error(
                'Failed to terminate the sky serve replica cluster '
                f'{cluster_name}. Retrying after {gap_seconds} seconds.'
                f'Details: {common_utils.format_exception(e)}')
            logger.error(f'  Traceback: {traceback.format_exc()}')
            time.sleep(gap_seconds)


def _get_resources_ports(yaml_content: str) -> str:
    """Get the resources ports used by the task."""
    task = task_lib.Task.from_yaml_str(yaml_content)
    # Already checked all ports are valid in sky.serve.core.up
    assert task.resources, task
    assert task.service is not None, task
    if task.service.pool:
        return '-'
    assert task.service.ports is not None, task
    return task.service.ports


def _should_use_spot(yaml_content: str,
                     resource_override: Optional[Dict[str, Any]]) -> bool:
    """Get whether the task should use spot."""
    if resource_override is not None:
        use_spot_override = resource_override.get('use_spot')
        if use_spot_override is not None:
            assert isinstance(use_spot_override, bool)
            return use_spot_override
    task = task_lib.Task.from_yaml_str(yaml_content)
    spot_use_resources = [
        resources for resources in task.resources if resources.use_spot
    ]
    # Either resources all use spot or none use spot.
    assert len(spot_use_resources) in [0, len(task.resources)]
    return len(spot_use_resources) == len(task.resources)


# Every function that calls serve_state.add_or_update_replica should acquire
# this lock. It is to prevent race condition when the replica status is updated
# by multiple threads at the same time. The modification of replica info is
# 2 database calls: read the whole replica info object, unpickle it, and modify
# corresponding fields. Then it is write back to the database. We need to ensure
# the read-modify-write operation is atomic.
def with_lock(func):

    @functools.wraps(func)
    def wrapper(self, *args, **kwargs):
        with self.lock:
            return func(self, *args, **kwargs)

    return wrapper


@dataclasses.dataclass
class ReplicaStatusProperty:
    """Some properties that determine replica status.

    Attributes:
        sky_launch_status: Process status of sky.launch.
        user_app_failed: Whether the service job failed.
        service_ready_now: Latest readiness probe result.
        first_ready_time: The first time the service is ready.
        sky_down_status: Process status of sky.down.
    """
    # sky.launch will always be scheduled on creation of ReplicaStatusProperty.
    sky_launch_status: common_utils.ProcessStatus = (
        common_utils.ProcessStatus.SCHEDULED)
    user_app_failed: bool = False
    service_ready_now: bool = False
    # None means readiness probe is not succeeded yet;
    # -1 means the initial delay seconds is exceeded.
    first_ready_time: Optional[float] = None
    # None means sky.down is not called yet.
    sky_down_status: Optional[common_utils.ProcessStatus] = None
    # Whether the termination is caused by autoscaler's decision
    is_scale_down: bool = False
    # The replica's spot instance was preempted.
    preempted: bool = False
    # Whether the replica is purged.
    purged: bool = False
    # Whether the replica failed to launch due to spot availability.
    # This is only possible when spot placer is enabled, so the retry until up
    # is set to True and it can fail immediately due to spot availability.
    failed_spot_availability: bool = False

    def unrecoverable_failure(self) -> bool:
        """Whether the replica fails and cannot be recovered.

        Autoscaler should stop scaling if any of the replica has unrecoverable
        failure, e.g., the user app fails before the service endpoint being
        ready for the current version.
        """
        replica_status = self.to_replica_status()
        logger.info(
            'Check replica unrecorverable: first_ready_time '
            f'{self.first_ready_time}, user_app_failed {self.user_app_failed}, '
            f'status {replica_status}')
        if replica_status not in serve_state.ReplicaStatus.terminal_statuses():
            return False
        if self.first_ready_time is not None:
            if self.first_ready_time >= 0:
                # If the service is ever up, we assume there is no bug in the
                # user code and the scale down is successful, thus enabling the
                # controller to remove the replica from the replica table and
                # auto restart the replica.
                # For replica with a failed sky.launch, it is likely due to some
                # misconfigured resources, so we don't want to auto restart it.
                # For replica with a failed sky.down, we cannot restart it since
                # otherwise we will have a resource leak.
                return False
            else:
                # If the initial delay exceeded, it is likely the service is not
                # recoverable.
                return True
        if self.user_app_failed:
            return True
        # TODO(zhwu): launch failures not related to resource unavailability
        # should be considered as unrecoverable failure. (refer to
        # `spot.recovery_strategy.StrategyExecutor::_launch`)
        return False

    def should_track_service_status(self) -> bool:
        """Should we track the status of the replica.

        This includes:
            (1) Job status;
            (2) Readiness probe.
        """
        if self.sky_launch_status != common_utils.ProcessStatus.SUCCEEDED:
            return False
        if self.sky_down_status is not None:
            return False
        if self.user_app_failed:
            return False
        if self.preempted:
            return False
        if self.purged:
            return False
        return True

    def to_replica_status(self) -> serve_state.ReplicaStatus:
        """Convert status property to human-readable replica status."""
        # Backward compatibility. Before we introduce ProcessStatus.SCHEDULED,
        # we use None to represent sky.launch is not called yet.
        if (self.sky_launch_status is None or
                self.sky_launch_status == common_utils.ProcessStatus.SCHEDULED):
            # Pending to launch
            return serve_state.ReplicaStatus.PENDING
        if self.sky_launch_status == common_utils.ProcessStatus.RUNNING:
            if self.sky_down_status == common_utils.ProcessStatus.FAILED:
                return serve_state.ReplicaStatus.FAILED_CLEANUP
            if self.sky_down_status == common_utils.ProcessStatus.SUCCEEDED:
                # This indicate it is a scale_down with correct teardown.
                # Should have been cleaned from the replica table.
                return serve_state.ReplicaStatus.UNKNOWN
            # Still launching
            return serve_state.ReplicaStatus.PROVISIONING
        if self.sky_launch_status == common_utils.ProcessStatus.INTERRUPTED:
            # sky.down is running and a scale down interrupted sky.launch
            return serve_state.ReplicaStatus.SHUTTING_DOWN
        if self.sky_down_status is not None:
            if self.preempted:
                # Replica (spot) is preempted
                return serve_state.ReplicaStatus.PREEMPTED
            if self.sky_down_status == common_utils.ProcessStatus.SCHEDULED:
                # sky.down is scheduled to run, but not started yet.
                return serve_state.ReplicaStatus.SHUTTING_DOWN
            if self.sky_down_status == common_utils.ProcessStatus.RUNNING:
                # sky.down is running
                return serve_state.ReplicaStatus.SHUTTING_DOWN
            if self.sky_launch_status == common_utils.ProcessStatus.INTERRUPTED:
                return serve_state.ReplicaStatus.SHUTTING_DOWN
            if self.sky_down_status == common_utils.ProcessStatus.FAILED:
                # sky.down failed
                return serve_state.ReplicaStatus.FAILED_CLEANUP
            if self.user_app_failed:
                # Failed on user setup/run
                return serve_state.ReplicaStatus.FAILED
            if self.sky_launch_status == common_utils.ProcessStatus.FAILED:
                # sky.launch failed
                return serve_state.ReplicaStatus.FAILED_PROVISION
            if self.first_ready_time is None:
                # readiness probe is not executed yet, but a scale down is
                # triggered.
                return serve_state.ReplicaStatus.SHUTTING_DOWN
            if self.first_ready_time == -1:
                # initial delay seconds exceeded
                return serve_state.ReplicaStatus.FAILED_INITIAL_DELAY
            if not self.service_ready_now:
                # Max continuous failure exceeded
                return serve_state.ReplicaStatus.FAILED_PROBING
            # This indicate it is a scale_down with correct teardown.
            # Should have been cleaned from the replica table.
            return serve_state.ReplicaStatus.UNKNOWN
        if self.sky_launch_status == common_utils.ProcessStatus.FAILED:
            # sky.launch failed
            # The down thread has not been started if it reaches here,
            # due to the `if self.sky_down_status is not None`` check above.
            # However, it should have been started by _refresh_thread_pool.
            # If not started, this means some bug prevent sky.down from
            # executing. It is also a potential resource leak, so we mark
            # it as FAILED_CLEANUP.
            return serve_state.ReplicaStatus.FAILED_CLEANUP
        if self.user_app_failed:
            # Failed on user setup/run
            # Same as above, the down thread should have been started.
            return serve_state.ReplicaStatus.FAILED_CLEANUP
        if self.service_ready_now:
            # Service is ready
            return serve_state.ReplicaStatus.READY
        if self.first_ready_time is not None and self.first_ready_time >= 0.0:
            # Service was ready before but not now
            return serve_state.ReplicaStatus.NOT_READY
        else:
            # No readiness probe passed and sky.launch finished
            return serve_state.ReplicaStatus.STARTING


class ReplicaInfo:
    """Replica info for each replica."""

    _VERSION = 2

    def __init__(self, replica_id: int, cluster_name: str, replica_port: str,
                 is_spot: bool, location: Optional[spot_placer.Location],
                 version: int, resources_override: Optional[Dict[str,
                                                                 Any]]) -> None:
        self._version = self._VERSION
        self.replica_id: int = replica_id
        self.cluster_name: str = cluster_name
        self.version: int = version
        self.replica_port: str = replica_port
        self.first_not_ready_time: Optional[float] = None
        self.consecutive_failure_times: List[float] = []
        self.status_property: ReplicaStatusProperty = ReplicaStatusProperty()
        self.is_spot: bool = is_spot
        self.location: Optional[Dict[str, Optional[str]]] = (
            location.to_pickleable() if location is not None else None)
        self.resources_override: Optional[Dict[str, Any]] = resources_override

    def get_spot_location(self) -> Optional[spot_placer.Location]:
        return spot_placer.Location.from_pickleable(self.location)

    def handle(
        self,
        cluster_record: Optional[Dict[str, Any]] = None
    ) -> Optional[backends.CloudVmRayResourceHandle]:
        """Get the handle of the cluster.

        Args:
            cluster_record: The cluster record in the cluster table. If not
                provided, will fetch the cluster record from the cluster table
                based on the cluster name.
        """
        if cluster_record is None:
            handle = global_user_state.get_handle_from_cluster_name(
                self.cluster_name)
        else:
            handle = cluster_record['handle']
        if handle is None:
            return None
        assert isinstance(handle, backends.CloudVmRayResourceHandle)
        return handle

    @property
    def is_terminal(self) -> bool:
        return self.status in serve_state.ReplicaStatus.terminal_statuses()

    @property
    def is_ready(self) -> bool:
        return self.status == serve_state.ReplicaStatus.READY

    @property
    def url(self) -> Optional[str]:
        handle = self.handle()
        if handle is None:
            return None
        if self.replica_port == '-':
            # This is a pool replica so there is no endpoint and it's filled
            # with this dummy value. We return None here so that we can
            # get the active ready replicas and perform autoscaling. Otherwise,
            # would error out when trying to get the endpoint.
            return None
        replica_port_int = int(self.replica_port)
        try:
            endpoint_dict = backend_utils.get_endpoints(handle.cluster_name,
                                                        replica_port_int)
        except exceptions.ClusterNotUpError:
            return None
        endpoint = endpoint_dict.get(replica_port_int, None)
        if not endpoint:
            return None
        assert isinstance(endpoint, str), endpoint
        # If replica doesn't start with http or https, add http://
        if not endpoint.startswith('http'):
            endpoint = 'http://' + endpoint
        return endpoint

    @property
    def status(self) -> serve_state.ReplicaStatus:
        replica_status = self.status_property.to_replica_status()
        if replica_status == serve_state.ReplicaStatus.UNKNOWN:
            logger.error('Detecting UNKNOWN replica status for '
                         f'replica {self.replica_id}.')
        return replica_status

    def to_info_dict(self,
                     with_handle: bool,
                     with_url: bool = True) -> Dict[str, Any]:
        cluster_record = global_user_state.get_cluster_from_name(
            self.cluster_name, include_user_info=False, summary_response=True)
        info_dict = {
            'replica_id': self.replica_id,
            'name': self.cluster_name,
            'status': self.status,
            'version': self.version,
            'endpoint': self.url if with_url else None,
            'is_spot': self.is_spot,
            'launched_at': (cluster_record['launched_at']
                            if cluster_record is not None else None),
        }
        if with_handle:
            handle = self.handle(cluster_record)
            info_dict['handle'] = handle
            if handle is not None:
                info_dict['cloud'] = repr(handle.launched_resources.cloud)
                info_dict['region'] = handle.launched_resources.region
                info_dict['resources_str'] = (
                    resources_utils.get_readable_resources_repr(
                        handle, simplified_only=True)[0])
        return info_dict

    def __repr__(self) -> str:
        show_details = env_options.Options.SHOW_DEBUG_INFO.get()
        info_dict = self.to_info_dict(with_handle=show_details,
                                      with_url=show_details)
        handle_str = ''
        if 'handle' in info_dict:
            handle_str = f', handle={info_dict["handle"]}'
        info = (f'ReplicaInfo(replica_id={self.replica_id}, '
                f'cluster_name={self.cluster_name}, '
                f'version={self.version}, '
                f'replica_port={self.replica_port}, '
                f'is_spot={self.is_spot}, '
                f'location={self.location}, '
                f'status={self.status}, '
                f'launched_at={info_dict["launched_at"]}{handle_str})')
        return info

    def probe_pool(self) -> Tuple['ReplicaInfo', bool, float]:
        """Probe the replica for pool management.

        This function will check the first job status of the cluster, which is a
        dummy job that only echoes "setup done". The success of this job means
        the setup command is done and the replica is ready to be used. Check
        sky/serve/server/core.py::up for more details.

        Returns:
            Tuple of (self, is_ready, probe_time).
        """
        probe_time = time.time()
        try:
            handle = backend_utils.check_cluster_available(
                self.cluster_name, operation='probing pool')
            if handle is None:
                return self, False, probe_time
            backend = backend_utils.get_backend_from_handle(handle)
            statuses = backend.get_job_status(handle, [1], stream_logs=False)
            if statuses[1] == job_lib.JobStatus.SUCCEEDED:
                return self, True, probe_time
            return self, False, probe_time
        except Exception as e:  # pylint: disable=broad-except
            logger.error(f'Error when probing pool of {self.cluster_name}: '
                         f'{common_utils.format_exception(e)}.')
            return self, False, probe_time

    def probe(
        self,
        readiness_path: str,
        post_data: Optional[Dict[str, Any]],
        timeout: int,
        headers: Optional[Dict[str, str]],
    ) -> Tuple['ReplicaInfo', bool, float]:
        """Probe the readiness of the replica.

        Returns:
            Tuple of (self, is_ready, probe_time).
        """
        replica_identity = f'replica {self.replica_id} with url {self.url}'
        # TODO(tian): This requiring the clock on each replica to be aligned,
        # which may not be true when the GCP VMs have run for a long time. We
        # should have a better way to do this. See #2539 for more information.
        probe_time = time.time()
        try:
            msg = ''
            # TODO(tian): Support HTTPS in the future.
            url = self.url
            if url is None:
                logger.info(f'Error when probing {replica_identity}: '
                            'Cannot get the endpoint.')
                return self, False, probe_time
            readiness_path = (f'{url}{readiness_path}')
            logger.info(f'Probing {replica_identity} with {readiness_path}.')
            if post_data is not None:
                msg += 'POST'
                response = requests.post(readiness_path,
                                         json=post_data,
                                         headers=headers,
                                         timeout=timeout)
            else:
                msg += 'GET'
                response = requests.get(readiness_path,
                                        headers=headers,
                                        timeout=timeout)
            msg += (f' request to {replica_identity} returned status '
                    f'code {response.status_code}')
            if response.status_code == 200:
                msg += '.'
                log_method = logger.info
            else:
                msg += f' and response {response.text}.'
                msg = f'{colorama.Fore.YELLOW}{msg}{colorama.Style.RESET_ALL}'
                log_method = logger.error
            log_method(msg)
            if response.status_code == 200:
                logger.debug(f'{replica_identity.capitalize()} is ready.')
                return self, True, probe_time
        except requests.exceptions.RequestException as e:
            logger.error(
                f'{colorama.Fore.YELLOW}Error when probing {replica_identity}:'
                f' {common_utils.format_exception(e)}.'
                f'{colorama.Style.RESET_ALL}')
        return self, False, probe_time

    def __setstate__(self, state):
        """Set state from pickled state, for backward compatibility."""
        version = state.pop('_version', None)
        # Handle old version(s) here.
        if version is None:
            version = -1

        if version < 0:
            # It will be handled with RequestRateAutoscaler.
            # Treated similar to on-demand instances.
            self.is_spot = False

        if version < 1:
            self.location = None

        if version < 2:
            self.resources_override = None

        self.__dict__.update(state)


class ReplicaManager:
    """Each replica manager monitors one service."""

    def __init__(self, service_name: str, spec: 'service_spec.SkyServiceSpec',
                 version: int) -> None:
        self.lock = threading.Lock()
        self._next_replica_id: int = 1
        self._service_name: str = service_name
        self._uptime: Optional[float] = None
        self._update_mode = serve_utils.DEFAULT_UPDATE_MODE
        self._is_pool: bool = spec.pool
        header_keys = None
        if spec.readiness_headers is not None:
            header_keys = list(spec.readiness_headers.keys())
        logger.info(f'Readiness probe path: {spec.readiness_path}\n'
                    f'Initial delay seconds: {spec.initial_delay_seconds}\n'
                    f'Post data: {spec.post_data}\n'
                    f'Readiness header keys: {header_keys}')

        # Newest version among the currently provisioned and launched replicas
        self.latest_version: int = version
        # Oldest version among the currently provisioned and launched replicas
        self.least_recent_version: int = version

    def _consecutive_failure_threshold_timeout(self) -> int:
        """The timeout for the consecutive failure threshold in seconds.

        We reduce the timeout for pool to 10 seconds to make the pool more
        responsive to the failure.
        """
        # TODO(tian): Maybe let user determine this threshold
        return 10 if self._is_pool else 180

    def scale_up(self,
                 resources_override: Optional[Dict[str, Any]] = None) -> None:
        """Scale up the service by 1 replica with resources_override.
        resources_override is of the same format with resources section
        in skypilot task yaml
        """
        raise NotImplementedError

    def scale_down(self, replica_id: int, purge: bool = False) -> None:
        """Scale down replica with replica_id."""
        raise NotImplementedError

    def update_version(self, version: int, spec: 'service_spec.SkyServiceSpec',
                       update_mode: serve_utils.UpdateMode) -> None:
        raise NotImplementedError

    def get_active_replica_urls(self) -> List[str]:
        """Get the urls of the active replicas."""
        raise NotImplementedError


class SkyPilotReplicaManager(ReplicaManager):
    """Replica Manager for SkyPilot clusters.

    It will run three daemon to monitor the status of the replicas:
        (1) _thread_pool_refresher: Refresh the launch/down thread pool
            to monitor the progress of the launch/down thread.
        (2) _job_status_fetcher: Fetch the job status of the service to
            monitor the status of the service jobs.
        (3) _replica_prober: Do readiness probe to the replicas to monitor
            whether it is still responding to requests.
    """

    def __init__(self, service_name: str, spec: 'service_spec.SkyServiceSpec',
                 version: int) -> None:
        super().__init__(service_name, spec, version)
        self.yaml_content = serve_state.get_yaml_content(service_name, version)
        task = task_lib.Task.from_yaml_str(self.yaml_content)
        self._spot_placer: Optional[spot_placer.SpotPlacer] = (
            spot_placer.SpotPlacer.from_task(spec, task))
        # TODO(tian): Store launch/down request id in the replica table, to make
        # the manager more persistent.
        self._launch_thread_pool: thread_utils.ThreadSafeDict[
            int, thread_utils.SafeThread] = thread_utils.ThreadSafeDict()
        self._replica_to_request_id: thread_utils.ThreadSafeDict[
            int, str] = thread_utils.ThreadSafeDict()
        self._replica_to_launch_cancelled: thread_utils.ThreadSafeDict[
            int, bool] = thread_utils.ThreadSafeDict()
        self._down_thread_pool: thread_utils.ThreadSafeDict[
            int, thread_utils.SafeThread] = thread_utils.ThreadSafeDict()

        threading.Thread(target=self._thread_pool_refresher).start()
        threading.Thread(target=self._job_status_fetcher).start()
        threading.Thread(target=self._replica_prober).start()

        self._recover_replica_operations()

    @with_lock
    def _recover_replica_operations(self):
        """Let's see are there something to do for ReplicaManager in a
        recovery run"""
        assert (not self._launch_thread_pool and not self._down_thread_pool
               ), 'We should not have any running threads in a recovery run'

        # There is a FIFO queue with capacity _MAX_NUM_LAUNCH for
        # _launch_replica.
        # We prioritize PROVISIONING replicas since they were previously
        # launched but may have been interrupted and need to be restarted.
        # This is why we handle PENDING replicas only after PROVISIONING
        # replicas.
        to_up_replicas = serve_state.get_replicas_at_status(
            self._service_name, serve_state.ReplicaStatus.PROVISIONING)
        to_up_replicas.extend(
            serve_state.get_replicas_at_status(
                self._service_name, serve_state.ReplicaStatus.PENDING))

        for replica_info in to_up_replicas:
            # It should be robust enough for `execution.launch` to handle cases
            # where the provisioning is partially done.
            # So we mock the original request based on all call sites,
            # including SkyServeController._run_autoscaler.
            self._launch_replica(
                replica_info.replica_id,
                resources_override=replica_info.resources_override)

        for replica_info in serve_state.get_replicas_at_status(
                self._service_name, serve_state.ReplicaStatus.SHUTTING_DOWN):
            self._terminate_replica(
                replica_info.replica_id,
                sync_down_logs=False,
                replica_drain_delay_seconds=0,
                purge=replica_info.status_property.purged,
                is_scale_down=replica_info.status_property.is_scale_down)

    ################################
    # Replica management functions #
    ################################

    # We don't need to add lock here since every caller of this function
    # will acquire the lock.
    def _launch_replica(
        self,
        replica_id: int,
        resources_override: Optional[Dict[str, Any]] = None,
    ) -> None:
        if replica_id in self._launch_thread_pool:
            logger.warning(f'Launch thread for replica {replica_id} '
                           'already exists. Skipping.')
            return
        logger.info(f'Launching replica {replica_id}...')
        cluster_name = serve_utils.generate_replica_cluster_name(
            self._service_name, replica_id)
        log_file_name = serve_utils.generate_replica_launch_log_file_name(
            self._service_name, replica_id)
        use_spot = _should_use_spot(self.yaml_content, resources_override)
        retry_until_up = True
        location = None
        if use_spot and self._spot_placer is not None:
            # For spot placer, we don't retry until up so any launch failed
            # due to availability issue will be handled by the placer.
            retry_until_up = False
            # TODO(tian): Currently, the resources_override can only be
            # `use_spot=True/False`, which will not cause any conflict with
            # spot placer's cloud, region & zone. When we add more resources
            # to the resources_override, we need to make sure they won't
            # conflict with the spot placer's selection.
            if resources_override is None:
                resources_override = {}
            current_spot_locations: List[spot_placer.Location] = []
            for info in serve_state.get_replica_infos(self._service_name):
                if info.is_spot:
                    spot_location = info.get_spot_location()
                    if spot_location is not None:
                        current_spot_locations.append(spot_location)
            location = self._spot_placer.select_next_location(
                current_spot_locations)
            resources_override.update(location.to_dict())
        t = thread_utils.SafeThread(
            target=launch_cluster,
            args=(replica_id, self.yaml_content, cluster_name, log_file_name,
                  self._replica_to_request_id,
                  self._replica_to_launch_cancelled, resources_override,
                  retry_until_up),
        )
        replica_port = _get_resources_ports(self.yaml_content)

        info = ReplicaInfo(replica_id, cluster_name, replica_port, use_spot,
                           location, self.latest_version, resources_override)
        serve_state.add_or_update_replica(self._service_name, replica_id, info)
        # Don't start right now; we will start it later in _refresh_thread_pool
        # to avoid too many sky.launch running at the same time.
        self._launch_thread_pool[replica_id] = t

    @with_lock
    def scale_up(self,
                 resources_override: Optional[Dict[str, Any]] = None) -> None:
        self._launch_replica(self._next_replica_id, resources_override)
        self._next_replica_id += 1

    def _handle_sky_down_finish(self, info: ReplicaInfo,
                                format_exc: Optional[str]) -> None:
        if format_exc is not None:
            logger.error(f'Down thread for replica {info.replica_id} '
                         f'exited abnormally with exception {format_exc}.')
            info.status_property.sky_down_status = (
                common_utils.ProcessStatus.FAILED)
        else:
            info.status_property.sky_down_status = (
                common_utils.ProcessStatus.SUCCEEDED)
        # Failed replica still count as a replica. In our current design, we
        # want to fail early if user code have any error. This will prevent
        # infinite loop of teardown and re-provision. However, there is a
        # special case that if the replica is UP for longer than
        # initial_delay_seconds, we assume it is just some random failure and
        # we should restart the replica. Please refer to the implementation of
        # `is_scale_down_succeeded` for more details.
        # TODO(tian): Currently, restart replicas that failed within
        # initial_delay_seconds is not supported. We should add it
        # later when we support `sky serve update`.
        removal_reason = None
        if info.status_property.is_scale_down:
            # This means the cluster is deleted due to an autoscaler
            # decision or the cluster is recovering from preemption.
            # Delete the replica info so it won't count as a replica.
            if info.status_property.preempted:
                removal_reason = 'for preemption recovery'
            else:
                removal_reason = 'normally'
        # Don't keep failed record for version mismatch replicas,
        # since user should fixed the error before update.
        elif info.version != self.latest_version:
            removal_reason = 'for version outdated'
        elif info.status_property.purged:
            removal_reason = 'for purge'
        elif info.status_property.failed_spot_availability:
            removal_reason = 'for spot availability failure'
        else:
            logger.info(f'Termination of replica {info.replica_id} '
                        'finished. Replica info is kept since some '
                        'failure detected.')
            serve_state.add_or_update_replica(self._service_name,
                                              info.replica_id, info)
        if removal_reason is not None:
            serve_state.remove_replica(self._service_name, info.replica_id)
            logger.info(f'Replica {info.replica_id} removed from the '
                        f'replica table {removal_reason}.')

    # We don't need to add lock here since every caller of this function
    # will acquire the lock.
    def _terminate_replica(self,
                           replica_id: int,
                           sync_down_logs: bool,
                           replica_drain_delay_seconds: int,
                           is_scale_down: bool = False,
                           purge: bool = False) -> None:
        left_in_record = not (is_scale_down or purge)
        if left_in_record:
            assert sync_down_logs, (
                'For the replica left in the record, '
                'the logs should always be synced down. '
                'So that the user can see the logs to debug.')

        if replica_id in self._launch_thread_pool:
            info = serve_state.get_replica_info_from_id(self._service_name,
                                                        replica_id)
            assert info is not None
            info.status_property.sky_launch_status = (
                common_utils.ProcessStatus.INTERRUPTED)
            serve_state.add_or_update_replica(self._service_name, replica_id,
                                              info)
            launch_thread = self._launch_thread_pool[replica_id]
            if launch_thread.is_alive():
                self._replica_to_launch_cancelled[replica_id] = True
                start_wait_time = time.time()
                timeout_reached = False
                while True:
                    # Launch request id found. cancel it.
                    if replica_id in self._replica_to_request_id:
                        request_id = self._replica_to_request_id[replica_id]
                        sdk.api_cancel(request_id)
                        break
                    if replica_id not in self._replica_to_launch_cancelled:
                        # Indicates that the cancellation was received.
                        break
                    if not launch_thread.is_alive():
                        # It's possible that the launch thread immediately
                        # finished after we check. Exit the loop now.
                        break
                    if (time.time() - start_wait_time >
                            _WAIT_LAUNCH_THREAD_TIMEOUT_SECONDS):
                        timeout_reached = True
                        break
                    time.sleep(0.1)
                if timeout_reached:
                    logger.warning(
                        'Failed to cancel launch request for replica '
                        f'{replica_id} after '
                        f'{_WAIT_LAUNCH_THREAD_TIMEOUT_SECONDS} seconds. '
                        'Force waiting the launch thread to finish.')
                else:
                    logger.info('Interrupted launch thread for replica '
                                f'{replica_id} and deleted the cluster.')
                launch_thread.join()
            else:
                logger.info(f'Launch thread for replica {replica_id} '
                            'already finished. Delete the cluster now.')
            self._launch_thread_pool.pop(replica_id)
            self._replica_to_request_id.pop(replica_id)

        if replica_id in self._down_thread_pool:
            logger.warning(f'Terminate thread for replica {replica_id} '
                           'already exists. Skipping.')
            return

        log_file_name = serve_utils.generate_replica_log_file_name(
            self._service_name, replica_id)

        def _download_and_stream_logs(info: ReplicaInfo):
            launch_log_file_name = (
                serve_utils.generate_replica_launch_log_file_name(
                    self._service_name, replica_id))
            # Write launch log to replica log file
            with open(log_file_name, 'w',
                      encoding='utf-8') as replica_log_file, open(
                          launch_log_file_name, 'r',
                          encoding='utf-8') as launch_file:
                replica_log_file.write(launch_file.read())
            os.remove(launch_log_file_name)

            logger.info(f'Syncing down logs for replica {replica_id}...')
            backend = backends.CloudVmRayBackend()
            handle = global_user_state.get_handle_from_cluster_name(
                info.cluster_name)
            if handle is None:
                logger.error(f'Cannot find cluster {info.cluster_name} for '
                             f'replica {replica_id} in the cluster table. '
                             'Skipping syncing down job logs.')
                return
            assert isinstance(handle, backends.CloudVmRayResourceHandle)
            replica_job_logs_dir = os.path.join(constants.SKY_LOGS_DIRECTORY,
                                                'replica_jobs')
            job_ids = ['1'] if self._is_pool else None
            job_log_file_name = controller_utils.download_and_stream_job_log(
                backend, handle, replica_job_logs_dir, job_ids)
            if job_log_file_name is not None:
                logger.info(f'\n== End of logs (Replica: {replica_id}) ==')
                with open(log_file_name, 'a',
                          encoding='utf-8') as replica_log_file, open(
                              os.path.expanduser(job_log_file_name),
                              'r',
                              encoding='utf-8') as job_file:
                    replica_log_file.write(job_file.read())
            else:
                with open(log_file_name, 'a',
                          encoding='utf-8') as replica_log_file:
                    replica_log_file.write(
                        f'Failed to sync down job logs from replica'
                        f' {replica_id}.\n')

        logger.info(f'Terminating replica {replica_id}...')
        info = serve_state.get_replica_info_from_id(self._service_name,
                                                    replica_id)
        assert info is not None

        if sync_down_logs:
            _download_and_stream_logs(info)

        logger.info(f'preempted: {info.status_property.preempted}, '
                    f'replica_id: {replica_id}')
        info.status_property.is_scale_down = is_scale_down
        info.status_property.purged = purge

        # If the cluster does not exist, it means either the cluster never
        # exists (e.g., the cluster is scaled down before it gets a chance to
        # provision) or the cluster is preempted and cleaned up by the status
        # refresh. In this case, we skip spawning a new down thread to save
        # controller resources.
        if not global_user_state.cluster_with_name_exists(info.cluster_name):
            self._handle_sky_down_finish(info, format_exc=None)
            return

        # Otherwise, start the thread to terminate the cluster.
        t = thread_utils.SafeThread(
            target=terminate_cluster,
            args=(info.cluster_name, log_file_name,
                  replica_drain_delay_seconds),
        )
        info.status_property.sky_down_status = (
            common_utils.ProcessStatus.SCHEDULED)
        serve_state.add_or_update_replica(self._service_name, replica_id, info)
        self._down_thread_pool[replica_id] = t

    @with_lock
    def scale_down(self, replica_id: int, purge: bool = False) -> None:
        self._terminate_replica(
            replica_id,
            sync_down_logs=False,
            replica_drain_delay_seconds=_DEFAULT_DRAIN_SECONDS,
            is_scale_down=True,
            purge=purge)

    # We don't need to add lock here since every caller of this function
    # will acquire the lock.
    def _handle_preemption(self, info: ReplicaInfo) -> bool:
        """Handle preemption of the replica if any error happened.

        Returns:
            bool: Whether the replica is preempted.
        """
        if not info.is_spot:
            return False

        # Get cluster handle first for zone information. The following
        # backend_utils.refresh_cluster_status_handle might delete the
        # cluster record from the cluster table.
        handle = global_user_state.get_handle_from_cluster_name(
            info.cluster_name)
        if handle is None:
            logger.error(f'Cannot find cluster {info.cluster_name} for '
                         f'replica {info.replica_id} in the cluster table. '
                         'Skipping preemption handling.')
            return False
        assert isinstance(handle, backends.CloudVmRayResourceHandle)
        # Pull the actual cluster status from the cloud provider to
        # determine whether the cluster is preempted.
        cluster_status, _ = backend_utils.refresh_cluster_status_handle(
            info.cluster_name,
            force_refresh_statuses=set(status_lib.ClusterStatus))

        if cluster_status == status_lib.ClusterStatus.UP:
            return False
        # The cluster is (partially) preempted. It can be down, INIT or STOPPED,
        # based on the interruption behavior of the cloud.
        cluster_status_str = ('' if cluster_status is None else
                              f' (status: {cluster_status.value})')
        logger.info(
            f'Replica {info.replica_id} is preempted{cluster_status_str}.')
        info.status_property.preempted = True
        if self._spot_placer is not None:
            spot_location = info.get_spot_location()
            assert spot_location is not None
            self._spot_placer.set_preemptive(spot_location)
        serve_state.add_or_update_replica(self._service_name, info.replica_id,
                                          info)
        self._terminate_replica(info.replica_id,
                                sync_down_logs=False,
                                replica_drain_delay_seconds=0,
                                is_scale_down=True)
        return True

    #################################
    # ReplicaManager Daemon Threads #
    #################################

    @with_lock
    def _refresh_thread_pool(self) -> None:
        """Refresh the launch/down thread pool.

        This function will checks all sky.launch and sky.down thread on
        the fly. If any of them finished, it will update the status of the
        corresponding replica.
        """
        # To avoid `dictionary changed size during iteration` error.
        launch_thread_pool_snapshot = list(self._launch_thread_pool.items())
        for replica_id, t in launch_thread_pool_snapshot:
            if t.is_alive():
                continue
            with filelock.FileLock(controller_utils.get_resources_lock_path()):
                info = serve_state.get_replica_info_from_id(
                    self._service_name, replica_id)
                assert info is not None, replica_id
                error_in_sky_launch = False
                if info.status == serve_state.ReplicaStatus.PENDING:
                    # sky.launch not started yet
                    if controller_utils.can_provision(self._is_pool):
                        t.start()
                        info.status_property.sky_launch_status = (
                            common_utils.ProcessStatus.RUNNING)
                else:
                    # sky.launch finished
                    # TODO(tian): Try-catch in thread, and have an enum return
                    # value to indicate which type of failure happened.
                    # Currently we only have user code failure since the
                    # retry_until_up flag is set to True, but it will be helpful
                    # when we enable user choose whether to retry or not.
                    logger.info(
                        f'Launch thread for replica {replica_id} finished.')
                    self._launch_thread_pool.pop(replica_id)
                    self._replica_to_request_id.pop(replica_id)
                    if t.format_exc is not None:
                        logger.warning(
                            f'Launch thread for replica {replica_id} '
                            f'exited abnormally with exception '
                            f'{t.format_exc}. Terminating...')
                        info.status_property.sky_launch_status = (
                            common_utils.ProcessStatus.FAILED)
                        error_in_sky_launch = True
                    else:
                        info.status_property.sky_launch_status = (
                            common_utils.ProcessStatus.SUCCEEDED)
                    if self._spot_placer is not None and info.is_spot:
                        # TODO(tian): Currently, we set the location to
                        # preemptive if the launch thread failed. This is
                        # because if the error is not related to the
                        # availability of the location, then all locations
                        # should failed for same reason. So it does not matter
                        # which location is preemptive or not, instead, all
                        # locations would fail. We should implement a log parser
                        # to detect if the error is actually related to the
                        # availability of the location later.
                        location = info.get_spot_location()
                        assert location is not None
                        if t.format_exc is not None:
                            self._spot_placer.set_preemptive(location)
                            info.status_property.failed_spot_availability = True
                        else:
                            self._spot_placer.set_active(location)
                serve_state.add_or_update_replica(self._service_name,
                                                  replica_id, info)
                if error_in_sky_launch:
                    # Teardown after update replica info since
                    # _terminate_replica will update the replica info too.
                    self._terminate_replica(replica_id,
                                            sync_down_logs=True,
                                            replica_drain_delay_seconds=0)
        down_thread_pool_snapshot = list(self._down_thread_pool.items())
        for replica_id, t in down_thread_pool_snapshot:
            if t.is_alive():
                continue
            info = serve_state.get_replica_info_from_id(self._service_name,
                                                        replica_id)
            assert info is not None, replica_id
            if (info.status_property.sky_down_status ==
                    common_utils.ProcessStatus.SCHEDULED):
                # sky.down not started yet
                if controller_utils.can_terminate(self._is_pool):
                    t.start()
                    info.status_property.sky_down_status = (
                        common_utils.ProcessStatus.RUNNING)
                    serve_state.add_or_update_replica(self._service_name,
                                                      replica_id, info)
            else:
                logger.info(
                    f'Terminate thread for replica {replica_id} finished.')
                self._down_thread_pool.pop(replica_id)
                self._handle_sky_down_finish(info, format_exc=t.format_exc)

        # Clean old version
        replica_infos = serve_state.get_replica_infos(self._service_name)
        current_least_recent_version = min([
            info.version for info in replica_infos
        ]) if replica_infos else self.least_recent_version
        if self.least_recent_version < current_least_recent_version:
            for version in range(self.least_recent_version,
                                 current_least_recent_version):
                yaml_content = serve_utils.get_yaml_content(
                    self._service_name, version)
                # Delete old version metadata.
                serve_state.delete_version(self._service_name, version)
                # Delete storage buckets of older versions.
                service.cleanup_storage(yaml_content)
            # newest version will be cleaned in serve down
            self.least_recent_version = current_least_recent_version

    def _thread_pool_refresher(self) -> None:
        """Periodically refresh the launch/down thread pool."""
        while True:
            logger.debug('Refreshing thread pool.')
            try:
                self._refresh_thread_pool()
            except Exception as e:  # pylint: disable=broad-except
                # No matter what error happens, we should keep the
                # thread pool refresher running.
                logger.error('Error in thread pool refresher: '
                             f'{common_utils.format_exception(e)}')
                with ux_utils.enable_traceback():
                    logger.error(f'  Traceback: {traceback.format_exc()}')
            time.sleep(_PROCESS_POOL_REFRESH_INTERVAL)

    @with_lock
    def _fetch_job_status(self) -> None:
        """Fetch the service job status of all replicas.

        This function will monitor the job status of all replicas
        to make sure the service is running correctly. If any of the
        replicas failed, it will terminate the replica.

        It is still needed even if we already keep probing the replicas,
        since the replica job might launch the API server in the background
        (using &), and the readiness probe will not detect the worker failure.
        """
        infos = serve_state.get_replica_infos(self._service_name)
        for info in infos:
            if not info.status_property.should_track_service_status():
                continue
            # We use backend API to avoid usage collection in the
            # sdk.job_status.
            backend = backends.CloudVmRayBackend()
            handle = info.handle()
            assert handle is not None, info
            # Use None to fetch latest job, which stands for user task job
            job_ids = [1] if self._is_pool else None
            try:
                job_statuses = backend.get_job_status(handle,
                                                      job_ids,
                                                      stream_logs=False)
            except exceptions.CommandError:
                # If the job status fetch failed, it is likely that the
                # cluster is preempted.
                is_preempted = self._handle_preemption(info)
                if is_preempted:
                    continue
                # Re-raise the exception if it is not preempted.
                raise
            job_status = job_statuses[1] if self._is_pool else list(
                job_statuses.values())[0]
            if job_status in job_lib.JobStatus.user_code_failure_states():
                info.status_property.user_app_failed = True
                serve_state.add_or_update_replica(self._service_name,
                                                  info.replica_id, info)
                logger.warning(
                    f'Service job for replica {info.replica_id} FAILED. '
                    'Terminating...')
                self._terminate_replica(info.replica_id,
                                        sync_down_logs=True,
                                        replica_drain_delay_seconds=0)

    def _job_status_fetcher(self) -> None:
        """Periodically fetch the service job status of all replicas."""
        while True:
            logger.debug('Refreshing job status.')
            try:
                self._fetch_job_status()
            except Exception as e:  # pylint: disable=broad-except
                # No matter what error happens, we should keep the
                # job status fetcher running.
                logger.error('Error in job status fetcher: '
                             f'{common_utils.format_exception(e)}')
                with ux_utils.enable_traceback():
                    logger.error(f'  Traceback: {traceback.format_exc()}')
            time.sleep(_JOB_STATUS_FETCH_INTERVAL)

    @with_lock
    def _probe_all_replicas(self) -> None:
        """Readiness probe replicas.

        This function will probe all replicas to make sure the service is
        ready. It will keep track of:
            (1) the initial delay for each replica;
            (2) the consecutive failure times.
        The replica will be terminated if any of the thresholds exceeded.
        """
        probe_futures = []
        replica_to_probe = []
        with mp_pool.ThreadPool() as pool:
            infos = serve_state.get_replica_infos(self._service_name)
            for info in infos:
                if not info.status_property.should_track_service_status():
                    continue
                if self._is_pool:
                    replica_to_probe.append(f'replica_{info.replica_id}(cluster'
                                            f'_name={info.cluster_name})')
                    probe_futures.append(pool.apply_async(info.probe_pool))
                else:
                    replica_to_probe.append(
                        f'replica_{info.replica_id}(url={info.url})')
                    probe_futures.append(
                        pool.apply_async(
                            info.probe,
                            (
                                self._get_readiness_path(info.version),
                                self._get_post_data(info.version),
                                self._get_readiness_timeout_seconds(
                                    info.version),
                                self._get_readiness_headers(info.version),
                            ),
                        ),)
            logger.info(f'Replicas to probe: {", ".join(replica_to_probe)}')

            # Since futures.as_completed will return futures in the order of
            # completion, we need the info.probe function to return the info
            # object as well, so that we could update the info object in the
            # same order.
            for future in probe_futures:
                future_result: Tuple[ReplicaInfo, bool, float] = future.get()
                info, probe_succeeded, probe_time = future_result
                info.status_property.service_ready_now = probe_succeeded
                should_teardown = False
                if probe_succeeded:
                    if self._uptime is None:
                        self._uptime = probe_time
                        logger.info(
                            f'Replica {info.replica_id} is the first ready '
                            f'replica. Setting uptime to {self._uptime}.')
                        serve_state.set_service_uptime(self._service_name,
                                                       int(self._uptime))
                    info.consecutive_failure_times.clear()
                    if info.status_property.first_ready_time is None:
                        info.status_property.first_ready_time = probe_time
                else:
                    # TODO(tian): This might take a lot of time. Shouldn't
                    # blocking probe to other replicas.
                    is_preempted = self._handle_preemption(info)
                    if is_preempted:
                        continue

                    if info.first_not_ready_time is None:
                        info.first_not_ready_time = probe_time
                    if info.status_property.first_ready_time is not None:
                        info.consecutive_failure_times.append(probe_time)
                        consecutive_failure_time = (
                            info.consecutive_failure_times[-1] -
                            info.consecutive_failure_times[0])
                        failure_threshold = (
                            self._consecutive_failure_threshold_timeout())
                        if consecutive_failure_time >= failure_threshold:
                            logger.info(
                                f'Replica {info.replica_id} is not ready for '
                                'too long and exceeding consecutive failure '
                                'threshold. Terminating the replica...')
                            should_teardown = True
                        else:
                            logger.info(
                                f'Replica {info.replica_id} is not ready '
                                'but within consecutive failure threshold '
                                f'({consecutive_failure_time}s / '
                                f'{failure_threshold}s). Skipping.')
                    else:
                        initial_delay_seconds = self._get_initial_delay_seconds(
                            info.version)
                        current_delay_seconds = (probe_time -
                                                 info.first_not_ready_time)
                        if current_delay_seconds > initial_delay_seconds:
                            logger.info(
                                f'Replica {info.replica_id} is not ready and '
                                'exceeding initial delay seconds. Terminating '
                                'the replica...')
                            should_teardown = True
                            info.status_property.first_ready_time = -1.0
                        else:
                            current_delay_seconds = int(current_delay_seconds)
                            logger.info(f'Replica {info.replica_id} is not '
                                        'ready but within initial delay '
                                        f'seconds ({current_delay_seconds}s '
                                        f'/ {initial_delay_seconds}s). '
                                        'Skipping.')
                serve_state.add_or_update_replica(self._service_name,
                                                  info.replica_id, info)
                if should_teardown:
                    self._terminate_replica(info.replica_id,
                                            sync_down_logs=True,
                                            replica_drain_delay_seconds=0)

    def _replica_prober(self) -> None:
        """Periodically probe replicas."""
        while True:
            logger.debug('Running replica prober.')
            try:
                self._probe_all_replicas()
                replica_infos = serve_state.get_replica_infos(
                    self._service_name)
                # TODO(zhwu): when there are multiple load balancers, we need
                # to make sure the active_versions are the union of all
                # versions of all load balancers.
                serve_utils.set_service_status_and_active_versions_from_replica(
                    self._service_name, replica_infos, self._update_mode)

            except Exception as e:  # pylint: disable=broad-except
                # No matter what error happens, we should keep the
                # replica prober running.
                logger.error('Error in replica prober: '
                             f'{common_utils.format_exception(e)}')
                with ux_utils.enable_traceback():
                    logger.error(f'  Traceback: {traceback.format_exc()}')
            # TODO(MaoZiming): Probe cloud for early preemption warning.
            time.sleep(serve_constants.ENDPOINT_PROBE_INTERVAL_SECONDS)

    def get_active_replica_urls(self) -> List[str]:
        """Get the urls of all active replicas."""
        record = serve_state.get_service_from_name(self._service_name)
        assert record is not None, (f'{self._service_name} not found on '
                                    'controller records.')
        ready_replica_urls = []
        active_versions = set(record['active_versions'])
        for info in serve_state.get_replica_infos(self._service_name):
            if (info.status == serve_state.ReplicaStatus.READY and
                    info.version in active_versions):
                assert info.url is not None, info
                ready_replica_urls.append(info.url)
        return ready_replica_urls

    ###########################################
    # SkyServe Update and replica versioning. #
    ###########################################

    def update_version(self, version: int, spec: 'service_spec.SkyServiceSpec',
                       update_mode: serve_utils.UpdateMode) -> None:
        if version <= self.latest_version:
            logger.error(f'Invalid version: {version}, '
                         f'latest version: {self.latest_version}')
            return
        yaml_content = serve_state.get_yaml_content(self._service_name, version)
        self.latest_version = version
        self.yaml_content = yaml_content
        self._update_mode = update_mode

        # Reuse all replicas that have the same config as the new version
        # (except for the `service` field) by directly setting the version to be
        # the latest version. This can significantly improve the speed
        # for updating an existing service with only config changes to the
        # service specs, e.g. scale down the service.
        new_config = yaml_utils.safe_load(yaml_content)
        # Always create new replicas and scale down old ones when file_mounts
        # are not empty.
        if new_config.get('file_mounts', None) != {}:
            return
        for key in ['service', 'pool', '_user_specified_yaml']:
            new_config.pop(key, None)
        new_config_any_of = new_config.get('resources', {}).pop('any_of', [])

        replica_infos = serve_state.get_replica_infos(self._service_name)
        for info in replica_infos:
            if info.version < version and not info.is_terminal:
                # Assume user does not change the yaml file on the controller.
                old_yaml_content = serve_state.get_yaml_content(
                    self._service_name, info.version)
                old_config = yaml_utils.safe_load(old_yaml_content)
                for key in ['service', 'pool', '_user_specified_yaml']:
                    old_config.pop(key, None)
                # Bump replica version if all fields except for service are
                # the same.
                # Here, we manually convert the any_of field to a set to avoid
                # only the difference in the random order of the any_of fields.
                old_config_any_of = old_config.get('resources',
                                                   {}).pop('any_of', [])

                if (resources_utils.normalize_any_of_resources_config(
                        old_config_any_of) != resources_utils.
                        normalize_any_of_resources_config(new_config_any_of)):
                    logger.info('Replica config changed (any_of), skipping. '
                                f'old: {old_config_any_of}, '
                                f'new: {new_config_any_of}')
                    continue
                # File mounts should both be empty, as update always
                # create new buckets if they are not empty.
                if (old_config == new_config and
                        old_config.get('file_mounts', None) == {}):
                    logger.info(
                        f'Updating replica {info.replica_id} to version '
                        f'{version}. Replica {info.replica_id}\'s config '
                        f'{old_config} is the same as '
                        f'latest version\'s {new_config}.')
                    info.version = version
                    serve_state.add_or_update_replica(self._service_name,
                                                      info.replica_id, info)
                else:
                    logger.info('Replica config changed (rest), skipping. '
                                f'old: {old_config}, '
                                f'new: {new_config}')

    def _get_version_spec(self, version: int) -> 'service_spec.SkyServiceSpec':
        spec = serve_state.get_spec(self._service_name, version)
        if spec is None:
            raise ValueError(f'Version {version} not found.')
        return spec

    def _get_readiness_path(self, version: int) -> str:
        return self._get_version_spec(version).readiness_path

    def _get_post_data(self, version: int) -> Optional[Dict[str, Any]]:
        return self._get_version_spec(version).post_data

    def _get_readiness_headers(self, version: int) -> Optional[Dict[str, str]]:
        return self._get_version_spec(version).readiness_headers

    def _get_initial_delay_seconds(self, version: int) -> int:
        return self._get_version_spec(version).initial_delay_seconds

    def _get_readiness_timeout_seconds(self, version: int) -> int:
        return self._get_version_spec(version).readiness_timeout_seconds
