import argparse
from multiprocessing.process import BaseProcess
from multiprocessing import connection
from typing import Optional, Any

import vllm
import vllm.envs as envs
from vllm.entrypoints.cli.serve import run_api_server_worker_proc
from vllm.entrypoints.openai.api_server import setup_server
from vllm.entrypoints.utils import cli_env_setup
from vllm.v1.engine.core import EngineCoreProc
from vllm.v1.executor.abstract import Executor
from vllm.v1.utils import APIServerProcessManager
from vllm.v1.engine.coordinator import DPCoordinator
from vllm.v1.engine.utils import CoreEngineProcManager
from vllm.v1.engine.utils import launch_core_engines
from vllm.usage.usage_lib import UsageContext
from vllm.utils import get_tcp_uri

from engine_server.utils.logger import run_log
from engine_server.core.worker import WorkerManager
from engine_server.utils.proc import get_child_processes

class ProcManager:
    def __init__(self, args:argparse.Namespace):
        self.args = args
        self.api_server_manager: Optional[APIServerProcessManager] = None
        self.worker_manager: Optional[WorkerManager] = None
        self.core_manager: Optional[CoreEngineProcManager] = None
        self.coordinator: Optional[DPCoordinator] = None
        self.status:str = "init"

    def _apply_request_adaptor(self):
        if self.args.api_server_count > 0:
            self.args.middleware.append("engine_server.core.vllm.vllm_adaptor.VllmMiddleware")

    def initialize(self):
        cli_env_setup()
        self._apply_request_adaptor()

    def run(self):
        if self.args.headless or self.args.api_server_count < 1:
            self._run_headless()
        else:
            self._run_multi_server()
        self.status = "normal"

    def join(self):
        try:
            sentinel_to_proc: dict[Any, BaseProcess] = {
                proc.sentinel: proc for proc in self.api_server_manager.processes
            }

            if self.coordinator:
                sentinel_to_proc[self.coordinator.proc.sentinel] = self.coordinator.proc

            for proc in self.core_manager.processes:
                sentinel_to_proc[proc.sentinel] = proc

            while sentinel_to_proc :
                ready_sentinels: list[Any] = connection.wait(sentinel_to_proc, timeout=5)
                # Process any terminated processes
                for sentinel in ready_sentinels:
                    proc = sentinel_to_proc.pop(sentinel)

                    # Check if process exited with error
                    if proc.exitcode != 0:
                        raise RuntimeError(
                            f"Process {proc.name} (PID: {proc.pid}) "
                            f"died with exit code {proc.exitcode}"
                        )

                if not self.worker_manager:
                    continue
                exited_processes = self.worker_manager.get_exited_processes()
                if len(exited_processes) > 0:
                    raise RuntimeError(f"some worker process exited, {exited_processes}")


        except KeyboardInterrupt:
            run_log.warning("Received KeyboardInterrupt, shutting down all processes...")
        except Exception as e:
            run_log.warning("Exception occurred while engine server: %s", str(e))
            raise
        finally:
            self.status = "abnormal"
            run_log.info("Terminating remaining processes ...")
            self.shutdown()

    def shutdown(self):
        run_log.info("shutting down...")
        if self.api_server_manager:
            self.api_server_manager.close()
        if self.coordinator:
            self.coordinator.close()
        if self.core_manager:
            self.core_manager.close()
        if self.worker_manager:
            self.worker_manager.close()

    def _init_worker_manager(self, size: int):
        child_processes = get_child_processes(self.core_manager.processes)

        if 1 < size != len(child_processes):
            raise RuntimeError(f"Expected {size} worker processes, got {len(child_processes)}")
        if size > 1:
            run_log.info(f"worker processes is: {child_processes}")
            self.worker_manager = WorkerManager(child_processes)

    def _run_headless(self):
        if self.args.api_server_count > 1:
            raise ValueError("api_server_count can't be set in headless mode")

        # Create the EngineConfig.
        engine_args = vllm.AsyncEngineArgs.from_cli_args(self.args)
        usage_context = UsageContext.OPENAI_API_SERVER
        vllm_config = engine_args.create_engine_config(
            usage_context=usage_context, headless=True
        )

        if not envs.VLLM_USE_V1:
            raise ValueError("Headless mode is only supported for V1")

        if engine_args.data_parallel_hybrid_lb:
            raise ValueError("data_parallel_hybrid_lb is not applicable in headless mode")

        parallel_config = vllm_config.parallel_config
        local_engine_count = parallel_config.data_parallel_size_local

        if local_engine_count <= 0:
            raise ValueError("data_parallel_size_local must be > 0 in headless mode")

        host = parallel_config.data_parallel_master_ip
        port = engine_args.data_parallel_rpc_port  # add to config too
        handshake_address = get_tcp_uri(host, port)

        run_log.info(
            "Launching %d data parallel engine(s) in headless mode, "
            "with head node address %s.",
            local_engine_count,
            handshake_address,
        )

        self.core_manager = CoreEngineProcManager(
            target_fn=EngineCoreProc.run_engine_core,
            local_engine_count=local_engine_count,
            start_index=vllm_config.parallel_config.data_parallel_rank,
            local_start_index=0,
            vllm_config=vllm_config,
            local_client=False,
            handshake_address=handshake_address,
            executor_class=Executor.get_class(vllm_config),
            log_stats=not engine_args.disable_log_stats,
        )
        self._init_worker_manager(parallel_config.pipeline_parallel_size * parallel_config.tensor_parallel_size)

    def _run_multi_server(self):
        num_api_servers: int = self.args.api_server_count
        listen_address, sock = setup_server(self.args)
        engine_args = vllm.AsyncEngineArgs.from_cli_args(self.args)
        engine_args._api_process_count = num_api_servers
        engine_args._api_process_rank = -1
        usage_context = UsageContext.OPENAI_API_SERVER
        vllm_config = engine_args.create_engine_config(usage_context=usage_context)

        executor_class = Executor.get_class(vllm_config)
        log_stats = not engine_args.disable_log_stats

        parallel_config = vllm_config.parallel_config
        dp_rank = parallel_config.data_parallel_rank
        external_dp_lb = parallel_config.data_parallel_external_lb
        hybrid_dp_lb = parallel_config.data_parallel_hybrid_lb

        assert external_dp_lb or hybrid_dp_lb or dp_rank == 0

        with launch_core_engines(vllm_config, executor_class, log_stats, num_api_servers
                                 ) as (self.core_manager, self.coordinator, addresses):
            api_server_manager_kwargs = dict(
                target_server_fn=run_api_server_worker_proc,
                listen_address=listen_address,
                sock=sock,
                args=self.args,
                num_servers=num_api_servers,
                input_addresses=addresses.inputs,
                output_addresses=addresses.outputs,
                stats_update_address=self.coordinator.get_stats_publish_address()
                if self.coordinator
                else None,
            )
            if dp_rank == 0 or not (external_dp_lb or hybrid_dp_lb):
                self.api_server_manager = APIServerProcessManager(**api_server_manager_kwargs)

        if self.api_server_manager is None:
            api_server_manager_kwargs["stats_update_address"] = (
                addresses.frontend_stats_publish_address
            )
            self.api_server_manager = APIServerProcessManager(**api_server_manager_kwargs)
        self._init_worker_manager(parallel_config.pipeline_parallel_size * parallel_config.tensor_parallel_size)
