"""Internal server daemons that run in the background."""
import atexit
import dataclasses
import os
import time
import typing
from typing import Callable

from sky import sky_logging
from sky import skypilot_config
from sky.adaptors import common as adaptors_common
from sky.server import constants as server_constants
from sky.server.requests import request_names
from sky.skylet import constants
from sky.utils import annotations
from sky.utils import common_utils
from sky.utils import env_options
from sky.utils import locks
from sky.utils import subprocess_utils
from sky.utils import timeline
from sky.utils import ux_utils

if typing.TYPE_CHECKING:
    import pathlib
else:
    pathlib = adaptors_common.LazyImport('pathlib')

logger = sky_logging.init_logger(__name__)


def _default_should_skip():
    return False


@dataclasses.dataclass
class InternalRequestDaemon:
    """Internal daemon that runs an event in the background."""

    id: str
    name: request_names.RequestName
    event_fn: Callable[[], None]
    default_log_level: str = 'INFO'
    should_skip: Callable[[], bool] = _default_should_skip

    def refresh_log_level(self) -> int:
        # pylint: disable=import-outside-toplevel
        import logging

        try:
            # Refresh config within the while loop.
            # Since this is a long running daemon,
            # reload_for_new_request()
            # is not called in between the event runs.
            # We don't need to grab the lock here because each of the daemons
            # run in their own process and thus have their own request context.
            skypilot_config.reload_config()
            # Get the configured log level for the daemon inside the event loop
            # in case the log level changes after the API server is started.
            level_str = skypilot_config.get_nested(
                ('daemons', self.id, 'log_level'), self.default_log_level)
            return getattr(logging, level_str.upper())
        except AttributeError:
            # Bad level should be rejected by
            # schema validation, just in case.
            logger.warning(f'Invalid log level: {level_str}, using DEBUG')
            return logging.DEBUG
        except Exception as e:  # pylint: disable=broad-except
            logger.exception(f'Error refreshing log level for {self.id}: {e}')
            return logging.DEBUG

    def run_event(self):
        """Run the event."""

        # Disable logging for periodic refresh to avoid the usage message being
        # sent multiple times.
        os.environ[env_options.Options.DISABLE_LOGGING.env_key] = '1'

        level = self.refresh_log_level()
        while True:
            try:
                with ux_utils.enable_traceback(), \
                    sky_logging.set_sky_logging_levels(level):
                    sky_logging.reload_logger()
                    level = self.refresh_log_level()
                    self.event_fn()
            except Exception:  # pylint: disable=broad-except
                # It is OK to fail to run the event, as the event is not
                # critical, but we should log the error.
                logger.exception(
                    f'Error running {self.name} event. '
                    f'Restarting in '
                    f'{server_constants.DAEMON_RESTART_INTERVAL_SECONDS} '
                    'seconds...')
                time.sleep(server_constants.DAEMON_RESTART_INTERVAL_SECONDS)
            finally:
                # Clear request level cache after each run to avoid
                # using too much memory.
                annotations.clear_request_level_cache()
                timeline.save_timeline()
                # Kill all children processes related to this request.
                # Each executor handles a single request, so we can safely
                # kill all children processes related to this request.
                subprocess_utils.kill_children_processes()
                common_utils.release_memory()


def refresh_cluster_status_event():
    """Periodically refresh the cluster status."""
    # pylint: disable=import-outside-toplevel
    from sky.backends import backend_utils

    logger.info('=== Refreshing cluster status ===')
    # This periodically refresh will hold the lock for the cluster being
    # refreshed, but it is OK because other operations will just wait for
    # the lock and get the just refreshed status without refreshing again.
    backend_utils.refresh_cluster_records()
    logger.info('Status refreshed. Sleeping '
                f'{server_constants.CLUSTER_REFRESH_DAEMON_INTERVAL_SECONDS}'
                ' seconds for the next refresh...\n')
    time.sleep(server_constants.CLUSTER_REFRESH_DAEMON_INTERVAL_SECONDS)


def refresh_volume_status_event():
    """Periodically refresh the volume status."""
    # pylint: disable=import-outside-toplevel
    from sky.volumes.server import core

    # Disable logging for periodic refresh to avoid the usage message being
    # sent multiple times.
    os.environ[env_options.Options.DISABLE_LOGGING.env_key] = '1'

    logger.info('=== Refreshing volume status ===')
    core.volume_refresh()
    logger.info('Volume status refreshed. Sleeping '
                f'{server_constants.VOLUME_REFRESH_DAEMON_INTERVAL_SECONDS}'
                ' seconds for the next refresh...\n')
    time.sleep(server_constants.VOLUME_REFRESH_DAEMON_INTERVAL_SECONDS)


_managed_job_consolidation_mode_lock = None


# Attempt to gracefully release the lock when the process exits.
# If this fails, it's okay, the lock will be released when the process dies.
def _release_managed_job_consolidation_mode_lock() -> None:
    global _managed_job_consolidation_mode_lock
    if _managed_job_consolidation_mode_lock is not None:
        _managed_job_consolidation_mode_lock.release()
        _managed_job_consolidation_mode_lock = None


atexit.register(_release_managed_job_consolidation_mode_lock)


def managed_job_status_refresh_event():
    """Refresh the managed job status for controller consolidation mode."""
    # pylint: disable=import-outside-toplevel
    from sky.jobs import constants as managed_job_constants
    from sky.jobs import utils as managed_job_utils

    global _managed_job_consolidation_mode_lock
    if _managed_job_consolidation_mode_lock is None:
        _managed_job_consolidation_mode_lock = locks.get_lock(
            managed_job_constants.CONSOLIDATION_MODE_LOCK_ID)

    # Touch the signal file here to avoid conflict with
    # update_managed_jobs_statuses. Although we run
    # ha_recovery_for_consolidation_mode before checking the job statuses
    # (events.ManagedJobEvent), update_managed_jobs_statuses is also called in
    # cancel_jobs_by_id.
    # We also need to make sure that new controllers are not started until we
    # acquire the consolidation mode lock, since if we have controllers on both
    # the new and old API server during a rolling update, calling
    # update_managed_jobs_statuses on the old API server could lead to
    # FAILED_CONTROLLER.
    signal_file = pathlib.Path(
        constants.PERSISTENT_RUN_RESTARTING_SIGNAL_FILE).expanduser()
    try:
        signal_file.touch()

        # Make sure the lock is acquired for this process before proceeding to
        # do recovery. This will block if another API server is still running,
        # but should proceed once it is terminated and releases the lock.
        if not _managed_job_consolidation_mode_lock.is_locked():
            logger.info('Acquiring the consolidation mode lock: '
                        f'{_managed_job_consolidation_mode_lock}')
            _managed_job_consolidation_mode_lock.acquire()
            logger.info('Lock acquired!')
        # We don't explicitly release the lock until the process exits.
        # Even if _release_managed_job_consolidation_mode_lock is not called,
        # the lock should be released when the process dies (either due to the
        # advisory file lock being released or the postgres session dying).

        # We run the recovery logic before checking the job statuses as those
        # two are conflicting. Check PERSISTENT_RUN_RESTARTING_SIGNAL_FILE for
        # details.
        managed_job_utils.ha_recovery_for_consolidation_mode()
    finally:
        # Now, we should be sure that this is the only API server, we have
        # started the new controllers and unclaimed all the jobs, and we are
        # ready to update the job statuses.
        signal_file.unlink()

    # After recovery, we start the event loop.
    from sky.skylet import events
    refresh_event = events.ManagedJobEvent()
    logger.info('=== Running managed job event ===')
    refresh_event.run()
    time.sleep(events.EVENT_CHECKING_INTERVAL_SECONDS)


def should_skip_managed_job_status_refresh():
    """Check if the managed job status refresh event should be skipped."""
    # pylint: disable=import-outside-toplevel
    from sky.jobs import utils as managed_job_utils
    return not managed_job_utils.is_consolidation_mode()


def _serve_status_refresh_event(pool: bool):
    """Refresh the sky serve status for controller consolidation mode."""
    # pylint: disable=import-outside-toplevel
    from sky.serve import serve_utils

    # We run the recovery logic before starting the event loop as those two are
    # conflicting. Check PERSISTENT_RUN_RESTARTING_SIGNAL_FILE for details.
    serve_utils.ha_recovery_for_consolidation_mode(pool=pool)

    # After recovery, we start the event loop.
    from sky.skylet import events
    event = events.ServiceUpdateEvent(pool=pool)
    noun = 'pool' if pool else 'serve'
    logger.info(f'=== Running {noun} status refresh event ===')
    event.run()
    time.sleep(events.EVENT_CHECKING_INTERVAL_SECONDS)


def _should_skip_serve_status_refresh_event(pool: bool):
    """Check if the serve status refresh event should be skipped."""
    # pylint: disable=import-outside-toplevel
    from sky.serve import serve_utils
    return not serve_utils.is_consolidation_mode(pool=pool)


def sky_serve_status_refresh_event():
    _serve_status_refresh_event(pool=False)


def should_skip_sky_serve_status_refresh():
    return _should_skip_serve_status_refresh_event(pool=False)


def pool_status_refresh_event():
    _serve_status_refresh_event(pool=True)


def should_skip_pool_status_refresh():
    return _should_skip_serve_status_refresh_event(pool=True)


# Register the events to run in the background.
INTERNAL_REQUEST_DAEMONS = [
    # This status refresh daemon can cause the autostopp'ed/autodown'ed cluster
    # set to updated status automatically, without showing users the hint of
    # cluster being stopped or down when `sky status -r` is called.
    InternalRequestDaemon(
        id='skypilot-status-refresh-daemon',
        name=request_names.RequestName.REQUEST_DAEMON_STATUS_REFRESH,
        event_fn=refresh_cluster_status_event,
        default_log_level='DEBUG'),
    # Volume status refresh daemon to update the volume status periodically.
    InternalRequestDaemon(
        id='skypilot-volume-status-refresh-daemon',
        name=request_names.RequestName.REQUEST_DAEMON_VOLUME_REFRESH,
        event_fn=refresh_volume_status_event),
    InternalRequestDaemon(id='managed-job-status-refresh-daemon',
                          name=request_names.RequestName.
                          REQUEST_DAEMON_MANAGED_JOB_STATUS_REFRESH,
                          event_fn=managed_job_status_refresh_event,
                          should_skip=should_skip_managed_job_status_refresh),
    InternalRequestDaemon(
        id='sky-serve-status-refresh-daemon',
        name=request_names.RequestName.REQUEST_DAEMON_SKY_SERVE_STATUS_REFRESH,
        event_fn=sky_serve_status_refresh_event,
        should_skip=should_skip_sky_serve_status_refresh),
    InternalRequestDaemon(
        id='pool-status-refresh-daemon',
        name=request_names.RequestName.REQUEST_DAEMON_POOL_STATUS_REFRESH,
        event_fn=pool_status_refresh_event,
        should_skip=should_skip_pool_status_refresh),
]


def is_daemon_request_id(request_id: str) -> bool:
    """Returns whether a specific request_id is an internal daemon."""
    return any([d.id == request_id for d in INTERNAL_REQUEST_DAEMONS])
