import asyncio
from dataclasses import dataclass
from pathlib import Path
from typing import (
    Any,
    AsyncIterable,
    AsyncIterator,
    Dict,
    Iterable,
    List,
    Set,
)

from tensorlake.function_executor.proto.message_validator import MessageValidator

from indexify.proto.executor_api_pb2 import (
    Allocation,
    AllocationOutcomeCode,
    DesiredExecutorState,
    FunctionCallResult,
    FunctionCallWatch,
    FunctionExecutorDescription,
    GetDesiredExecutorStatesRequest,
)
from indexify.proto.executor_api_pb2_grpc import ExecutorAPIStub

from .blob_store.blob_store import BLOBStore
from .channel_manager import ChannelManager
from .function_executor.server.function_executor_server_factory import (
    FunctionExecutorServerFactory,
)
from .function_executor_controller import (
    FunctionExecutorController,
    allocation_logger,
    function_executor_logger,
    validate_allocation,
    validate_function_executor_description,
)
from .function_executor_controller.function_call_watch_dispatcher import (
    FunctionCallWatchDispatcher,
)
from .metrics.state_reconciler import (
    metric_desired_state_stream_errors,
    metric_desired_state_streams,
    metric_state_reconciliation_errors,
    metric_state_reconciliation_latency,
    metric_state_reconciliations,
)
from .state_reporter import ExecutorStateReporter

# Intervals between recreating the desired state stream on errors.
# Do quick reconnect to not elongate allocation runs for seconds unnecessarily.
_DESIRED_STATES_STREAM_MIN_BACKOFF_SEC = 0.005  # 5 ms
_DESIRED_STATES_STREAM_MAX_BACKOFF_SEC = 5
_DESIRED_STATES_STREAM_BACKOFF_MULTIPLIER = 10  # 5 ms, 50 ms, 0.5 sec, 5 sec
# Max retries to reconcile the desired state before giving up.
_RECONCILIATION_RETRIES = 3
# If we didn't get a new desired state from the stream within this timeout then the stream might
# not be healthy due to network disruption. In this case we need to recreate the stream to make
# sure that Server really doesn't want to send us a new state.
_DESIRED_EXECUTOR_STATES_TIMEOUT_SEC = 5 * 60  # 5 minutes


@dataclass
class _FunctionCallWatchInfo:
    watch: FunctionCallWatch
    result_queues: list[asyncio.Queue]


def _function_call_watch_key(
    namespace: str, request_id: str, function_call_id: str
) -> str:
    # Allows to group watches for the same function call id into the same group.
    return f"{namespace}.{request_id}.{function_call_id}"


# Server sends undefined outcomes to us if watched function is still running.
# We need to filter those out as AllocationRunner only requires a single terminal outcome.
_terminal_allocation_outcome_codes: list[AllocationOutcomeCode] = [
    AllocationOutcomeCode.ALLOCATION_OUTCOME_CODE_SUCCESS,
    AllocationOutcomeCode.ALLOCATION_OUTCOME_CODE_FAILURE,
]


class ExecutorStateReconciler:
    def __init__(
        self,
        executor_id: str,
        function_executor_server_factory: FunctionExecutorServerFactory,
        cache_path: Path,
        blob_store: BLOBStore,
        channel_manager: ChannelManager,
        state_reporter: ExecutorStateReporter,
        logger: Any,
    ):
        self._executor_id: str = executor_id
        self._function_executor_server_factory: FunctionExecutorServerFactory = (
            function_executor_server_factory
        )
        self._cache_path: Path = cache_path
        self._blob_store: BLOBStore = blob_store
        self._channel_manager: ChannelManager = channel_manager
        self._state_reporter: ExecutorStateReporter = state_reporter
        self._function_call_watch_dispatcher: FunctionCallWatchDispatcher = (
            FunctionCallWatchDispatcher(self)
        )
        self._logger: Any = logger.bind(module=__name__)

        # Mutable state. Doesn't need lock because we access from async tasks running in the same thread.
        self._desired_states_reader: asyncio.Task | None = None
        self._reconciliation_loop_runner: asyncio.Task | None = None
        self._function_executor_controllers: Dict[str, FunctionExecutorController] = {}
        self._shutting_down_fe_ids: Set[str] = set()
        self._last_server_clock: int | None = None
        # Content derived FunctionCallWatch key -> _FunctionCallWatchInfo
        self._function_call_watchers: Dict[str, _FunctionCallWatchInfo] = {}

        self._last_desired_state_lock = asyncio.Lock()
        self._last_desired_state_change_notifier: asyncio.Condition = asyncio.Condition(
            lock=self._last_desired_state_lock
        )
        self._last_desired_state: DesiredExecutorState | None = None

    def get_desired_state(self) -> DesiredExecutorState | None:
        return self._last_desired_state

    def run(self):
        """Runs the state reconciler.

        Never raises any exceptions. Doesn't block.
        """
        if self._reconciliation_loop_runner is not None:
            self._logger.error(
                "reconciliation loop aio task is already running, skipping run call"
            )
            return

        self._reconciliation_loop_runner = asyncio.create_task(
            self._reconciliation_loop(),
            name="state reconciler reconciliation loop",
        )
        self._desired_states_reader = asyncio.create_task(
            self._desired_states_reader_loop(),
            name="state reconciler desired states stream reader",
        )

    async def shutdown(self):
        """Shuts down the state reconciler.

        Never raises any exceptions.
        """
        if self._reconciliation_loop_runner is not None:
            self._reconciliation_loop_runner.cancel()
            try:
                await self._reconciliation_loop_runner
            except asyncio.CancelledError:
                # Expected cancellation, nothing to do.
                pass
            self._logger.info("reconciliation loop is shutdown")

        if self._desired_states_reader is not None:
            self._desired_states_reader.cancel()
            try:
                await self._desired_states_reader
            except asyncio.CancelledError:
                # Expected cancellation, nothing to do.
                pass
            self._logger.info("desired states stream reader loop is shutdown")

        # Now all the aio tasks exited so nothing will intervene with our actions from this point.
        fe_shutdown_aio_tasks: List[asyncio.Task] = []
        for fe_controller in self._function_executor_controllers.values():
            fe_shutdown_aio_tasks.append(
                asyncio.create_task(
                    fe_controller.shutdown(),
                    name=f"Shutdown Function Executor {fe_controller.function_executor_id()}",
                )
            )

        # Run all the shutdown tasks concurrently and wait for them to complete.
        for aio_task in fe_shutdown_aio_tasks:
            await aio_task

        self._function_executor_controllers.clear()
        self._logger.info("state reconciler is shutdown")

    def add_function_call_watch(
        self, watch: FunctionCallWatch, result_queue: asyncio.Queue
    ) -> None:
        """Adds a function call watcher.

        Doesn't raise any exceptions.
        """
        content_derived_key: str = _function_call_watch_key(
            namespace=watch.namespace,
            request_id=watch.request_id,
            function_call_id=watch.function_call_id,
        )
        if content_derived_key not in self._function_call_watchers:
            self._function_call_watchers[content_derived_key] = _FunctionCallWatchInfo(
                watch=watch,
                result_queues=[],
            )
        self._function_call_watchers[content_derived_key].result_queues.append(
            result_queue
        )

    def remove_function_call_watch(
        self, watch: FunctionCallWatch, result_queue: asyncio.Queue
    ) -> None:
        """Removes a function call watcher.

        Doesn't raise any exceptions. Logs an warning on failure.
        """
        content_derived_key: str = _function_call_watch_key(
            namespace=watch.namespace,
            request_id=watch.request_id,
            function_call_id=watch.function_call_id,
        )
        watch_info: _FunctionCallWatchInfo | None = self._function_call_watchers.get(
            content_derived_key
        )
        if watch_info is None:
            self._logger.warning(
                "attempted to remove non-existing function call watcher",
                watch=str(watch),
            )
            return

        try:
            watch_info.result_queues.remove(result_queue)
        except ValueError:
            self._logger.warning(
                "attempted to remove non-registered result queue from function call watcher",
                watch=str(watch),
            )
            return

        if len(watch_info.result_queues) == 0:
            # No more result queues, remove the watcher completely.
            self._function_call_watchers.pop(content_derived_key, None)

    async def _desired_states_reader_loop(self):
        """Reads the desired states stream from Server and processes it.

        Never raises any exceptions. Gets cancelled via aio task cancellation.
        """
        backoff_interval_sec: float = _DESIRED_STATES_STREAM_MIN_BACKOFF_SEC

        while True:
            desired_states_stream: AsyncIterable[DesiredExecutorState] | None = None
            try:
                stub = ExecutorAPIStub(await self._channel_manager.get_shared_channel())
                # Report state once before starting the stream so Server
                # doesn't use stale state it knew about this Executor in the past.
                await self._state_reporter.report_state_and_wait_for_completion()

                desired_states_stream = stub.get_desired_executor_states(
                    GetDesiredExecutorStatesRequest(executor_id=self._executor_id)
                )

                metric_desired_state_streams.inc()
                self._logger.info("created new desired states stream")
                await self._process_desired_states_stream(desired_states_stream)
                backoff_interval_sec = _DESIRED_STATES_STREAM_MIN_BACKOFF_SEC
            except Exception as e:
                backoff_interval_sec = min(
                    backoff_interval_sec * _DESIRED_STATES_STREAM_BACKOFF_MULTIPLIER,
                    _DESIRED_STATES_STREAM_MAX_BACKOFF_SEC,
                )
                metric_desired_state_stream_errors.inc()
                self._logger.error(
                    f"error while processing desired states stream",
                    exc_info=e,
                )
            finally:
                # Cleanly signal Server that the stream is closed by client.
                # See https://stackoverflow.com/questions/72207914/how-to-stop-listening-on-a-stream-in-python-grpc-client
                if desired_states_stream is not None:
                    desired_states_stream.cancel()

            self._logger.info(
                f"desired states stream closed, reconnecting in {backoff_interval_sec} sec"
            )
            await asyncio.sleep(backoff_interval_sec)

    async def _process_desired_states_stream(
        self, desired_states: AsyncIterable[DesiredExecutorState]
    ):
        """Processes the desired states stream from Server.

        Never returns, only raises exceptions on failure except if the failure requires
        reset of backoff interval.
        """
        desired_states_iter: AsyncIterator[DesiredExecutorState] = aiter(desired_states)
        while True:
            try:
                # Raises StopAsyncIteration when the stream ends.
                new_state: DesiredExecutorState = await asyncio.wait_for(
                    anext(desired_states_iter),
                    timeout=_DESIRED_EXECUTOR_STATES_TIMEOUT_SEC,
                )
            except asyncio.TimeoutError:
                # These log lines really help to debug networking issues. When there are
                # no networking issues and the fleet is not idle we don't get excessive logging here.
                self._logger.info(
                    f"No desired state received from Server within {_DESIRED_EXECUTOR_STATES_TIMEOUT_SEC} sec, recreating the stream to ensure it is healthy"
                )
                break  # Timeout reached, stream might be unhealthy, exit the loop to recreate the stream.

            self._last_server_clock = new_state.clock
            # Always read the latest desired state value from the stream so
            # we're never acting on stale desired states.
            async with self._last_desired_state_lock:
                self._last_desired_state = new_state
                self._last_desired_state_change_notifier.notify_all()

    async def _reconciliation_loop(self):
        """Continuously reconciles the desired state with the current state.

        Never raises any exceptions. Get cancelled via aio task cancellation."""
        last_reconciled_state: DesiredExecutorState | None = None
        while True:
            async with self._last_desired_state_lock:
                # Comparing object identities (references) is enough here to not reconcile
                # the same state twice.
                while self._last_desired_state is last_reconciled_state:
                    await self._last_desired_state_change_notifier.wait()
                last_reconciled_state = self._last_desired_state

            with metric_state_reconciliation_latency.time():
                metric_state_reconciliations.inc()
                await self._reconcile_state(last_reconciled_state)
                # Update the clock regardless of success or failure.
                # This is to show Server that we actually processed the message.
                self._state_reporter.update_last_server_clock(
                    last_reconciled_state.clock
                )

    async def _reconcile_state(self, desired_state: DesiredExecutorState):
        """Reconciles the desired state with the current state.

        Doesn't raise any exceptions. Logs all errors for future investigation becase the gRPC protocol
        doesn't allow us to return errors to the Server if it supplied invalid messages.
        """
        for attempt in range(_RECONCILIATION_RETRIES):
            try:
                # Reconcile FEs first because allocations depend on them.
                self._reconcile_function_executors(desired_state.function_executors)
                self._reconcile_allocations(desired_state.allocations)
                self._reconcile_function_call_results(
                    desired_state.function_call_results
                )
                return
            except Exception as e:
                self._logger.error(
                    "failed to reconcile desired state, retrying in 5 secs",
                    exc_info=e,
                    attempt=attempt,
                    attempts_left=_RECONCILIATION_RETRIES - attempt,
                )
                await asyncio.sleep(5)

        metric_state_reconciliation_errors.inc()
        self._logger.error(
            f"failed to reconcile desired state after {_RECONCILIATION_RETRIES} attempts",
        )

    def _reconcile_function_executors(
        self, function_executor_descriptions: Iterable[FunctionExecutorDescription]
    ):
        valid_fe_descriptions: List[FunctionExecutorDescription] = (
            self._valid_function_executor_descriptions(function_executor_descriptions)
        )
        for fe_description in valid_fe_descriptions:
            self._reconcile_function_executor(fe_description)

        seen_fe_ids: Set[str] = set(map(lambda fe: fe.id, valid_fe_descriptions))
        fe_ids_to_remove = set(self._function_executor_controllers.keys()) - seen_fe_ids
        for fe_id in fe_ids_to_remove:
            # Server forgot this FE, so its safe to forget about it now too.
            self._remove_function_executor_controller(fe_id)

    def _valid_function_executor_descriptions(
        self, function_executor_descriptions: Iterable[FunctionExecutorDescription]
    ):
        valid_function_executor_descriptions: List[FunctionExecutorDescription] = []
        for function_executor_description in function_executor_descriptions:
            function_executor_description: FunctionExecutorDescription
            logger = function_executor_logger(
                function_executor_description, self._logger
            )

            try:
                validate_function_executor_description(function_executor_description)
            except ValueError as e:
                logger.error(
                    "received invalid FunctionExecutorDescription from Server, dropping it from desired state",
                    exc_info=e,
                )
                continue

            valid_function_executor_descriptions.append(function_executor_description)

        return valid_function_executor_descriptions

    def _reconcile_function_executor(
        self, function_executor_description: FunctionExecutorDescription
    ):
        """Reconciles a single Function Executor with the desired state.

        Doesn't block on any long running operations. Doesn't raise any exceptions.
        """

        if function_executor_description.id not in self._function_executor_controllers:
            self._add_function_executor_controller(function_executor_description)

    def _add_function_executor_controller(
        self, function_executor_description: FunctionExecutorDescription
    ) -> None:
        """Creates Function Executor for the supplied description and adds it to internal data structures.

        Doesn't block on any long running operations. Doesn't raise any exceptions.
        """
        logger = function_executor_logger(function_executor_description, self._logger)
        try:
            controller: FunctionExecutorController = FunctionExecutorController(
                executor_id=self._executor_id,
                function_executor_description=function_executor_description,
                function_executor_server_factory=self._function_executor_server_factory,
                channel_manager=self._channel_manager,
                state_reporter=self._state_reporter,
                function_call_watch_dispatcher=self._function_call_watch_dispatcher,
                blob_store=self._blob_store,
                cache_path=self._cache_path,
                logger=self._logger,
            )
            self._function_executor_controllers[function_executor_description.id] = (
                controller
            )
            controller.startup()
        except Exception as e:
            logger.error("failed adding Function Executor", exc_info=e)

    def _remove_function_executor_controller(self, function_executor_id: str) -> None:
        # Don't remove the FE controller from self._function_executor_controllers until
        # its shutdown is complete. Otherwise, if Server re-adds the FE to desired state
        # before FE shutdown completes then we'll have two FE controllers for the same
        # FE ID which results in many bugs.
        if function_executor_id in self._shutting_down_fe_ids:
            return

        self._shutting_down_fe_ids.add(function_executor_id)
        asyncio.create_task(
            self._shutdown_function_executor_controller(function_executor_id),
            name=f"Shutdown Function Executor {function_executor_id}",
        )

    async def _shutdown_function_executor_controller(
        self, function_executor_id: str
    ) -> None:
        # We are not cancelling this aio task in self.shutdown(). Because of this the code here should
        # not fail if the FE controller is not found in internal data structures. It can be removed
        # by self.shutdown() at any time while we're running this aio task.
        fe_controller: FunctionExecutorController | None = (
            self._function_executor_controllers.get(function_executor_id)
        )
        if fe_controller is None:
            return

        await fe_controller.shutdown()
        self._function_executor_controllers.pop(function_executor_id, None)
        self._shutting_down_fe_ids.discard(function_executor_id)

    def _reconcile_allocations(self, allocations: Iterable[Allocation]):
        valid_allocations: List[Allocation] = self._valid_allocations(allocations)
        for allocation in valid_allocations:
            self._reconcile_allocation(allocation)

        # Cancel allocs that are no longer in the desired state.
        # FE ID => [Allocation ID]
        desired_alloc_ids_per_fe: Dict[str, List[str]] = {}
        for allocation in valid_allocations:
            if allocation.function_executor_id not in desired_alloc_ids_per_fe:
                desired_alloc_ids_per_fe[allocation.function_executor_id] = []
            desired_alloc_ids_per_fe[allocation.function_executor_id].append(
                allocation.allocation_id
            )

        for fe_controller in self._function_executor_controllers.values():
            fe_controller: FunctionExecutorController
            if fe_controller.function_executor_id() in desired_alloc_ids_per_fe:
                desired_fe_alloc_ids: Set[str] = set(
                    desired_alloc_ids_per_fe[fe_controller.function_executor_id()]
                )
            else:
                # No allocations desired for this FE, so cancel all its allocations.
                desired_fe_alloc_ids: Set[str] = set()
            actual_fe_alloc_ids: Set[str] = set(fe_controller.allocation_ids())
            alloc_ids_to_remove: Set[str] = actual_fe_alloc_ids - desired_fe_alloc_ids
            for alloc_id in alloc_ids_to_remove:
                fe_controller.remove_allocation(alloc_id)

    def _reconcile_allocation(self, allocation: Allocation):
        """Reconciles a single Allocation with the desired state.

        Doesn't raise any exceptions.
        """
        function_executor_controller: FunctionExecutorController = (
            self._function_executor_controllers[allocation.function_executor_id]
        )
        if function_executor_controller.has_allocation(allocation.allocation_id):
            # Nothing to do, allocation already exists and it's immutable.
            return

        function_executor_controller.add_allocation(allocation)

    def _valid_allocations(self, allocations: Iterable[Allocation]):
        valid_allocations: List[Allocation] = []
        for allocation in allocations:
            allocation: Allocation
            logger = allocation_logger(allocation, self._logger)

            try:
                validate_allocation(allocation)
            except ValueError as e:
                # There's no way to report this error to Server so just log it.
                logger.error(
                    "received invalid Allocation from Server, dropping it from desired state",
                    exc_info=e,
                )
                continue

            if (
                allocation.function_executor_id
                not in self._function_executor_controllers
            ):
                logger.error(
                    "received Allocation for a Function Executor that doesn't exist, dropping it from desired state"
                )
                continue

            valid_allocations.append(allocation)

        return valid_allocations

    def _reconcile_function_call_results(
        self, function_call_results: Iterable[FunctionCallResult]
    ):
        """Reconciles the function call results with the watchers.

        Doesn't raise any exceptions. Doesn't block.
        """
        for function_call_result in function_call_results:
            # Filter out function call result updates Executor is not interested in.
            if (
                function_call_result.outcome_code
                not in _terminal_allocation_outcome_codes
            ):
                continue
            if (
                function_call_result.outcome_code
                == AllocationOutcomeCode.ALLOCATION_OUTCOME_CODE_SUCCESS
                and not function_call_result.HasField("return_value")
            ):
                # This function call is waiting for tail call that it returned to resolve to a value.
                continue

            content_derived_key: str = _function_call_watch_key(
                namespace=function_call_result.namespace,
                request_id=function_call_result.request_id,
                function_call_id=function_call_result.function_call_id,
            )
            watch_info: _FunctionCallWatchInfo | None = (
                self._function_call_watchers.get(content_derived_key)
            )
            if watch_info is None:
                self._logger.warning(
                    "no watchers found for function call result, ignoring result",
                    child_fn_call_id=function_call_result.function_call_id,
                )
                continue

            for result_queue in watch_info.result_queues:
                # The queue max size is 1 so if it's full then we already delivered
                # the result to the FE so just skip delivering it again.
                if result_queue.full():
                    continue

                try:
                    result_queue.put_nowait(function_call_result)
                except asyncio.QueueFull:
                    # This should never happen because of the full check above.
                    self._logger.error(
                        "unexpectedly failed to deliver function call result to watcher because the result queue is full",
                        child_fn_call_id=function_call_result.function_call_id,
                    )
