import os
from multiprocessing.process import BaseProcess
from multiprocessing import connection
from typing import List, Optional, Union, Any

import vllm
from vllm.entrypoints.openai.api_server import setup_server
from vllm.v1.executor.abstract import Executor
from vllm.v1.utils import APIServerProcessManager
from vllm.v1.engine.coordinator import DPCoordinator
from vllm.v1.engine.utils import CoreEngineProcManager, CoreEngineActorManager
from vllm.v1.engine.utils import launch_core_engines
from vllm.entrypoints.cli.serve import run_api_server_worker_proc
from vllm.usage.usage_lib import UsageContext

from engine_server.engine.engine import BaseEngineMgr
from engine_server.engine.vllm.args import parse_args
from engine_server.utils.logger import run_log


def cli_env_setup():
    if "VLLM_WORKER_MULTIPROC_METHOD" not in os.environ:
        print("Setting VLLM_WORKER_MULTIPROC_METHOD to 'spawn'")
        os.environ["VLLM_WORKER_MULTIPROC_METHOD"] = "spawn"

class VLLMEngineMgr(BaseEngineMgr):
    def __init__(self, arg_list: List[str]):
        self.args = parse_args(arg_list)
        self.api_server_manager: Optional[APIServerProcessManager] = None
        self.core_manager: Optional[Union[CoreEngineProcManager, CoreEngineActorManager]] = None
        self.coordinator: Optional[DPCoordinator] = None

    def stop(self) -> None:
        run_log.info("VLLMEngineMgr stop interface is called")
        if self.api_server_manager:
            self.api_server_manager.close()
        if self.coordinator:
            self.coordinator.close()
        if self.core_manager:
            self.core_manager.close()

    def start(self):
        num_api_servers: int = self.args.api_server_count
        listen_address, sock = setup_server(self.args)
        engine_args = vllm.AsyncEngineArgs.from_cli_args(self.args)
        engine_args._api_process_count = num_api_servers
        engine_args._api_process_rank = -1
        usage_context = UsageContext.OPENAI_API_SERVER
        vllm_config = engine_args.create_engine_config(usage_context=usage_context)

        executor_class = Executor.get_class(vllm_config)
        log_stats = not engine_args.disable_log_stats

        parallel_config = vllm_config.parallel_config
        dp_rank = parallel_config.data_parallel_rank
        external_dp_lb = parallel_config.data_parallel_external_lb
        hybrid_dp_lb = parallel_config.data_parallel_hybrid_lb

        assert external_dp_lb or hybrid_dp_lb or dp_rank == 0

        with launch_core_engines(vllm_config, executor_class, log_stats, num_api_servers
                                 ) as (self.core_manager, self.coordinator, addresses):
            api_server_manager_kwargs = dict(
                target_server_fn=run_api_server_worker_proc,
                listen_address=listen_address,
                sock=sock,
                args=self.args,
                num_servers=num_api_servers,
                input_addresses=addresses.inputs,
                output_addresses=addresses.outputs,
                stats_update_address=self.coordinator.get_stats_publish_address()
                if self.coordinator
                else None,
            )
            if dp_rank == 0 or not (external_dp_lb or hybrid_dp_lb):
                self.api_server_manager = APIServerProcessManager(**api_server_manager_kwargs)

        if self.api_server_manager is None:
            api_server_manager_kwargs["stats_update_address"] = (
                addresses.frontend_stats_publish_address
            )
            self.api_server_manager = APIServerProcessManager(**api_server_manager_kwargs)

        self._hang_and_monitoring()

    def _hang_and_monitoring(self):
        try:
            sentinel_to_proc: dict[Any, BaseProcess] = {
                proc.sentinel: proc for proc in self.api_server_manager.processes
            }

            if self.coordinator:
                sentinel_to_proc[self.coordinator.proc.sentinel] = self.coordinator.proc

            actor_run_refs = []
            if isinstance(self.core_manager, CoreEngineProcManager):
                for proc in self.core_manager.processes:
                    sentinel_to_proc[proc.sentinel] = proc
            elif isinstance(self.core_manager, CoreEngineActorManager):
                actor_run_refs = self.core_manager.get_run_refs()

            while sentinel_to_proc or actor_run_refs:
                # Wait for any process to terminate
                ready_sentinels: list[Any] = connection.wait(sentinel_to_proc, timeout=5)

                # Process any terminated processes
                for sentinel in ready_sentinels:
                    proc = sentinel_to_proc.pop(sentinel)

                    # Check if process exited with error
                    if proc.exitcode != 0:
                        raise RuntimeError(
                            f"Process {proc.name} (PID: {proc.pid}) "
                            f"died with exit code {proc.exitcode}"
                        )

                if actor_run_refs:
                    import ray

                    _, actor_run_refs = ray.wait(actor_run_refs, timeout=5)

        except KeyboardInterrupt:
            run_log.warn("Received KeyboardInterrupt, shutting down API servers...")
        except Exception as e:
            run_log.warn("Exception occurred while running API servers: %s", str(e))
            raise
        finally:
            run_log.info("Terminating remaining processes ...")
            if self.api_server_manager:
                self.api_server_manager.close()
            if self.coordinator:
                self.coordinator.close()
            if self.core_manager:
                self.core_manager.close()