# Adapted from https://github.com/vllm-project/vllm/tests/v1/kv_connector/nixl_integration/toy_proxy_server.py

# SPDX-License-Identifier: Apache-2.0

import argparse
import asyncio
import functools
import heapq
import logging
import os
import sys
import uuid
import threading
import time
from contextlib import asynccontextmanager
from dataclasses import dataclass
from typing import List, Dict, Tuple, Optional

import httpx
from fastapi import FastAPI, Request, HTTPException
from fastapi.responses import StreamingResponse


MAX_INT = sys.maxsize # the max int value on the machine

# Header constants for node override
HEADER_PREFILL_POD_ADDRESS_PORT = "x-openfuyao-prefill-pod-address-port"
HEADER_DECODE_POD_ADDRESS_PORT = "x-openfuyao-decode-pod-address-port"


@dataclass
class ServiceRequest:
    """Data class for service request parameters"""
    client: httpx.AsyncClient
    prefiller_id: str
    endpoint: str
    req_data: dict
    request_id: str
    max_retries: int = 3
    base_delay: float = 0.2


# Configure basic logging
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)

# K8s client imports
try:
    from kubernetes import client, config
    K8S_AVAILABLE = True
except ImportError:
    K8S_AVAILABLE = False
    logger.warning("Warning: kubernetes package not available. Install with: pip install kubernetes")


# Add uvloop for faster event loop if available
try:
    import uvloop
    asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
except ImportError:
    pass


class VllmServiceDiscovery:
    """K8s service discovery class, responsible for periodically discovering and updating service endpoints"""
    
    def __init__(self, args):
        self.prefiller_labels = args.prefiller_labels
        self.decoder_labels = args.decoder_labels
        self.namespace = args.namespace
        self.discovery_interval = args.discovery_interval
        self.prefiller_container = args.prefiller_container
        self.decoder_container = args.decoder_container
        self.prefiller_port_name = args.prefiller_port_name
        self.decoder_port_name = args.decoder_port_name
        self.prefiller_instances: List[Tuple[str, int]] = []
        self.decoder_instances: List[Tuple[str, int]] = []
        self.running = False
        self.discovery_thread = None
        self.lock = threading.Lock()
        
        # Initialize K8s client
        if K8S_AVAILABLE:
            try:
                config.load_incluster_config()
            except Exception:
                try:
                    config.load_kube_config()
                except Exception:
                    logger.warning("Warning: Could not load K8s config. Service discovery will not work.")
                    return
            
            self.v1 = client.CoreV1Api()
            self.start_discovery()
        else:
            logger.warning("Warning: K8s client not available. Service discovery will not work.")
    
    @staticmethod
    def _get_container_port(pod, container_name: str, port_name: str) -> int:
        """Get container port number"""
        default_port = 8100 if "prefill" in pod.metadata.name.lower() else 8200
        try:
            # If container_name is not specified, use the first container
            containers = pod.spec.containers
            if not containers:
                return default_port
            
            target_container = containers[0] if container_name is None else None
            if container_name:
                for container in containers:
                    if container.name == container_name:
                        target_container = container
                        break
            
            if target_container and target_container.ports:
                # Prioritize finding named ports (port aliases)
                for port in target_container.ports:
                    if hasattr(port, 'name') and port.name:
                        curr_port_name = port.name.lower()
                        if curr_port_name == port_name:
                            logger.debug("Found named port '%s' with value %s for container %s", 
                                port.name, port.container_port, target_container.name)
                            return port.container_port
                
                logger.debug("Using first port %s for container %s", 
                    target_container.ports[0].container_port, target_container.name)
                return target_container.ports[0].container_port
            
            # If no port is found, return default port
            return default_port
            
        except Exception as e:
            logger.warning("Failed to get container port for pod %s: %s", pod.metadata.name, e)
            # Determine default port based on pod name
            return default_port
    
    def _discover_services(self):
        """Execute service discovery logic"""
        try:
            if not K8S_AVAILABLE:
                return
            
            # Discover prefiller services
            prefiller_instances = []
            try:
                if hasattr(self, 'v1'):
                    pods = self.v1.list_namespaced_pod(
                        namespace=self.namespace,
                        label_selector=self.prefiller_labels  # Format: "label1=value1,label2=value2" (AND logic)
                    )
                    for pod in pods.items:
                        if pod.status.phase == 'Running':

                            pod_ip = pod.status.pod_ip
                            port = self._get_container_port(pod, self.prefiller_container, self.prefiller_port_name)
                            prefiller_instances.append((pod_ip, port))
            except Exception as e:
                logger.warning("Failed to discover prefiller services: %s", e)
            
            # Discover decoder services
            decoder_instances = []
            try:
                if hasattr(self, 'v1'):
                    pods = self.v1.list_namespaced_pod(
                        namespace=self.namespace,
                        label_selector=self.decoder_labels  # Format: "label1=value1,label2=value2" (AND logic)
                    )
                    for pod in pods.items:
                        if pod.status.phase == 'Running':
                            pod_ip = pod.status.pod_ip
                            # Get container port
                            port = self._get_container_port(pod, self.decoder_container, self.decoder_port_name)
                            decoder_instances.append((pod_ip, port))
            except Exception as e:
                logger.warning("Failed to discover decoder services: %s", e)
            
            with self.lock:
                self.prefiller_instances = prefiller_instances
                self.decoder_instances = decoder_instances
                
            logger.debug("Service discovery completed: %d prefiller, %d decoder instances", 
                len(prefiller_instances), len(decoder_instances))
            
        except Exception as e:
            logger.error("Service discovery error: %s", e)
    
    def _discovery_loop(self):
        """Service discovery loop"""
        while self.running:
            try:
                self._discover_services()
            except Exception as e:
                logger.error("Service discovery loop error: %s", e)
            
            if self.running:
                time.sleep(self.discovery_interval)
    
    def start_discovery(self):
        """Start service discovery"""
        if self.running:
            return
        
        self.running = True
        self.discovery_thread = threading.Thread(target=self._discovery_loop, daemon=True)
        self.discovery_thread.start()
        logger.info("vllm service discovery started")
    
    def stop_discovery(self):
        """Stop service discovery"""
        self.running = False
        if self.discovery_thread:
            self.discovery_thread.join(timeout=5)
        logger.info("K8s service discovery stopped")
    
    def get_prefiller_instances(self) -> List[Tuple[str, int]]:
        """Get current prefiller instances list"""
        with self.lock:
            return self.prefiller_instances.copy()
    
    def get_decoder_instances(self) -> List[Tuple[str, int]]:
        """Get current decoder instances list"""
        with self.lock:
            return self.decoder_instances.copy()


class ServerState:

    def __init__(self, host, port):
        self.host = host
        self.port = port
        # base url delete v1
        self.url = f'http://{host}:{port}'
        self.client = httpx.AsyncClient(timeout=None,
                                        base_url=self.url,
                                        limits=httpx.Limits(
                                            max_connections=100000,
                                            max_keepalive_connections=100000))
        self.active_tokens = 0
        self.active_kv_cache = 0  # Only for prefiller
        self.active_requests = 0  # Number of active requests
        self.aborted_requests = set()  # Track aborted requests
        # Removed individual server lock - will use global locks instead


class ProxyState:
    def __init__(self, service_discovery: VllmServiceDiscovery):
        self.service_discovery = service_discovery
        self.prefillers: Dict[str, ServerState] = {}
        self.decoders: Dict[str, ServerState] = {}
        self.prefill_instances_map: Dict[Tuple[str, int], int] = {}
        self.decode_instances_map: Dict[Tuple[str, int], int] = {}
        self.req_to_prefiller = {}
        self.req_id_lock = asyncio.Lock()
        self.instances_lock = threading.Lock()
        self.prefiller_heap = []
        self.decoder_heap = []
        heapq.heapify(self.prefiller_heap)
        heapq.heapify(self.decoder_heap)
        self._update_instances()

    def _update_instances(self):
        """Update service instances list"""
        with self.instances_lock:
            prefiller_instances = self.service_discovery.get_prefiller_instances()
            decoder_instances = self.service_discovery.get_decoder_instances()

            new_prefiller_instances = []
            new_decoder_instances = []
            
            _prefill_instances_map = {instance: 0 for instance in self.prefill_instances_map}
            _decode_instances_map = {instance: 0 for instance in self.decode_instances_map}

            for instance in prefiller_instances:
                if instance not in _prefill_instances_map:
                    new_prefiller_instances.append(instance)
                _prefill_instances_map[instance] = 1

            for instance in decoder_instances:
                if instance not in _decode_instances_map:
                    new_decoder_instances.append(instance)
                _decode_instances_map[instance] = 2

            # add  new instances to the priority queues
            for h, p in new_prefiller_instances:
                server = ServerState(h, p)
                index = f"{h}:{p}"
                self.prefillers[index] = server
                heapq.heappush(self.prefiller_heap, (0, index, server))  # type: ignore
            for h, p in new_decoder_instances:
                server = ServerState(h, p)
                index = f"{h}:{p}"
                self.decoders[index] = server
                heapq.heappush(self.decoder_heap, (0, index, server))  # type: ignore
            logger.info("Added new instances to the priority queues, prefiller: %s, decoder: %s", 
                new_prefiller_instances, new_decoder_instances)

            # set priority of old instances to infinity, or remove from the heap
            for instance, status in _prefill_instances_map.items():
                if status == 0:
                    index = f"{instance[0]}:{instance[1]}"
                    server = self.prefillers.pop(index)
                    server.active_tokens = MAX_INT
                    logger.info("Removed prefiller instance from the priority queues, prefiller: %s", instance)
            for instance, status in _decode_instances_map.items():
                if status == 0:
                    index = f"{instance[0]}:{instance[1]}"
                    server = self.decoders.pop(index)
                    server.active_tokens = MAX_INT
                    logger.info("Removed decoder instance from the priority queues, decoder: %s", instance)
            
            self.prefill_instances_map = {k: v for k, v in _prefill_instances_map.items() if v != 0}
            self.decode_instances_map = {k: v for k, v in _decode_instances_map.items() if v != 0}
            logger.debug("Updated priority queues successfully")

    def refresh_instances(self):
        """Refresh service instances (get latest information from service discovery)"""
        self._update_instances()

    def _update_prefiller_priority(self, server_idx: str):
        """Update the priority of a prefiller server in the heap."""
        with self.instances_lock:
            server = self.prefillers[server_idx]
            # Priority based on active_tokens and active_kv_cache
            priority = server.active_tokens + server.active_kv_cache * 0.3
            # Remove old entry and add new one
            self.prefiller_heap = [(p, i, s) for p, i, s in self.prefiller_heap
                                   if i != server_idx]
            heapq.heappush(self.prefiller_heap,
                           (priority, server_idx, server))  # type: ignore

    def _update_decoder_priority(self, server_idx: str):
        """Update the priority of a decoder server in the heap."""
        with self.instances_lock:
            server = self.decoders[server_idx]
            priority = server.active_tokens
            # Remove old entry and add new one
            self.decoder_heap = [(p, i, s) for p, i, s in self.decoder_heap
                                 if i != server_idx]
            heapq.heappush(self.decoder_heap,
                           (priority, server_idx, server))  # type: ignore

    def abort_prefiller_request(self, server_idx: str,
                                request_id):  # Changed to synchronous
        """
        Mark a request as aborted. This will helps to release kv cache in
        prefiller node.
        """
        # No lock needed - atomic operation
        self.prefillers[server_idx].aborted_requests.add(request_id)

    def aquire_aborted_prefiller_requests(
            self, server_idx: str):  # Changed to synchronous
        """
        Get the set of aborted requests and clear it.
        This is used to release kv cache in prefiller node.
        """
        # No lock needed - atomic operation
        aborted_requests = self.prefillers[server_idx].aborted_requests.copy()
        self.prefillers[server_idx].aborted_requests.clear()
        return aborted_requests

    async def next_req_id(self):
        async with self.req_id_lock:
            return str(uuid.uuid4())

    def determine_next_prefiller(self) -> str:
        """Determine the next prefiller to be used without actually selecting it."""
        if not self.prefiller_heap:
            raise RuntimeError("No prefiller servers available")

        # Peek at the top of the heap without removing it
        priority, chosen, server = self.prefiller_heap[0]
        
        return chosen

    def find_prefiller_by_address(self, address: str, port: int) -> Optional[str]:
        """Find prefiller index by address and port."""
        idx = f"{address}:{port}"
        if idx in self.prefillers:
            return idx
        return None

    def find_decoder_by_address(self, address: str, port: int) -> Optional[str]:
        """Find decoder index by address and port."""
        idx = f"{address}:{port}"
        if idx in self.decoders:
            return idx
        return None

    def select_prefiller_by_address(self, address: str, port: int, token_count: float) -> str:
        """Select prefiller by address and port, updating its priority."""
        chosen = self.find_prefiller_by_address(address, port)
        if chosen is None:
            raise RuntimeError(f"Prefiller server {address}:{port} not found")
        
        # Update the server atomically
        self.prefillers[chosen].active_tokens += token_count
        self.prefillers[chosen].active_kv_cache += token_count
        
        # Update priority and re-add to heap
        self._update_prefiller_priority(chosen)
        
        return chosen

    def select_decoder_by_address(self, address: str, port: int, token_count: float) -> str:
        """Select decoder by address and port, updating its priority."""
        chosen = self.find_decoder_by_address(address, port)
        if chosen is None:
            raise RuntimeError(f"Decoder server {address}:{port} not found")
        
        # Update the server atomically
        self.decoders[chosen].active_tokens += token_count
        
        # Update priority and re-add to heap
        self._update_decoder_priority(chosen)
        
        return chosen

    def select_prefiller(self, token_count):  # Changed to synchronous
        # No lock needed - entire function is atomic
        if not self.prefiller_heap:
            raise RuntimeError("No prefiller servers available")

        priority, chosen, server = heapq.heappop(self.prefiller_heap)

        # Update the chosen server atomically
        self.prefillers[chosen].active_tokens += token_count
        self.prefillers[chosen].active_kv_cache += token_count

        # Update priority and re-add to heap
        self._update_prefiller_priority(chosen)

        return chosen

    def release_prefiller(self, idx, token_count):  # Changed to synchronous
        # No lock needed - atomic operation
        self.prefillers[idx].active_tokens -= token_count
        # Update priority queue after releasing
        self._update_prefiller_priority(idx)

    def release_prefiller_kv(self, idx, token_count):  # Changed to synchronous
        # No lock needed - atomic operation
        if self.prefillers[idx].active_kv_cache > 0:
            self.prefillers[idx].active_kv_cache -= token_count
        # Update priority queue after releasing
        self._update_prefiller_priority(idx)

    def select_decoder(self, token_count):  # Changed to synchronous
        # No lock needed - entire function is atomic
        if not self.decoder_heap:
            raise RuntimeError("No decoder servers available")

        priority, chosen, server = heapq.heappop(self.decoder_heap)

        # Update the chosen server atomically
        self.decoders[chosen].active_tokens += token_count

        # Update priority and re-add to heap
        self._update_decoder_priority(chosen)

        return chosen

    def release_decoder(self, idx, token_count):  # Changed to synchronous
        # No lock needed - atomic operation
        self.decoders[idx].active_tokens -= token_count
        # Update priority queue after releasing
        self._update_decoder_priority(idx)

    # Omni_infer's calculate_input_scores function
    @staticmethod
    def calculate_prefill_scores(request_length: int) -> float:
        length_score = request_length / 4.0
        input_score = length_score * 0.0345 + 120.0745
        return input_score

    @staticmethod
    def calculate_decode_scores(request_length: int) -> float:
        return request_length


proxy_state = None


def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument("--port", type=int, default=8000)
    parser.add_argument("--host", type=str, default="localhost")
    parser.add_argument("--prefiller-hosts",
                        type=str,
                        nargs="+",
                        default=["localhost"])
    parser.add_argument("--prefiller-hosts-num",
                        type=int,
                        nargs="+",
                        default=None)
    parser.add_argument("--prefiller-ports",
                        type=int,
                        nargs="+",
                        default=[8001])
    parser.add_argument("--prefiller-ports-inc",
                        type=int,
                        nargs="+",
                        default=None)
    parser.add_argument("--decoder-hosts",
                        type=str,
                        nargs="+",
                        default=["localhost"])
    parser.add_argument("--decoder-hosts-num",
                        type=int,
                        nargs="+",
                        default=None)
    parser.add_argument("--decoder-ports", type=int, nargs="+", default=[8002])
    parser.add_argument("--decoder-ports-inc",
                        type=int,
                        nargs="+",
                        default=None)
    parser.add_argument("--prefiller-labels", type=str, default="app=prefill",
                        help=("K8s label selector for prefiller pods (comma-separated, AND logic, "
                              "e.g., 'label1=xxx,label2=xxx' means pod must have both labels)"))
    parser.add_argument("--decoder-labels", type=str, default="app=decode",
                        help=("K8s label selector for decoder pods (comma-separated, AND logic, "
                              "e.g., 'label1=xxx,label2=xxx' means pod must have both labels)"))
    parser.add_argument("--prefiller-container", type=str, default="prefill-engine",
                        help="Container name for prefiller pods")
    parser.add_argument("--decoder-container", type=str, default="decode-engine",
                        help="Container name for decoder pods")
    parser.add_argument("--prefiller-port-name", type=str, default="prefill-port",
                        help="Port name for prefiller pods")
    parser.add_argument("--decoder-port-name", type=str, default="decode-port",
                        help="Port name for decoder pods")
    parser.add_argument("--namespace", type=str, default="default",
                        help="K8s namespace for service discovery")
    parser.add_argument("--discovery-interval", type=int, default=10,
                        help="Service discovery interval in seconds")
    parser.add_argument("--max-retries",
                        type=int,
                        default=3,
                        help="Maximum number of retries for HTTP requests")
    parser.add_argument(
        "--retry-delay",
        type=float,
        default=0.001,
        help="Base delay (seconds) for exponential backoff retries")
    args = parser.parse_args()
    if len(args.prefiller_hosts) != len(args.prefiller_ports):
        raise ValueError(
            "Number of prefiller hosts must match number of prefiller ports")
    if len(args.decoder_hosts) != len(args.decoder_ports):
        raise ValueError(
            "Number of decoder hosts must match number of decoder ports")
    if args.prefiller_hosts_num is not None and (len(args.prefiller_hosts_num)
                                                 != len(args.prefiller_hosts)):
        raise ValueError(
            "Number of prefiller hosts num must match number of prefiller hosts"
        )
    if args.prefiller_ports_inc is not None and (len(args.prefiller_ports_inc)
                                                 != len(args.prefiller_ports)):
        raise ValueError(
            "Number of prefiller ports inc must match number of prefiller ports"
        )
    if args.decoder_hosts_num is not None and (len(args.decoder_hosts_num) !=
                                               len(args.decoder_hosts)):
        raise ValueError(
            "Number of decoder hosts num must match number of decoder hosts")
    if args.decoder_ports_inc is not None and (len(args.decoder_ports_inc) !=
                                               len(args.decoder_ports)):
        raise ValueError(
            "Number of decoder ports inc must match number of decoder ports")

    if args.prefiller_hosts_num is not None:
        new_hosts = []
        for host, num in zip(args.prefiller_hosts, args.prefiller_hosts_num):
            for _ in range(num):
                new_hosts.append(host)
        args.prefiller_hosts = new_hosts
    if args.prefiller_ports_inc is not None:
        new_ports = []
        for port, inc in zip(args.prefiller_ports, args.prefiller_ports_inc):
            for i in range(inc):
                new_ports.append(int(port) + i)
        args.prefiller_ports = new_ports

    if args.decoder_hosts_num is not None:
        new_hosts = []
        for host, num in zip(args.decoder_hosts, args.decoder_hosts_num):
            for _ in range(num):
                new_hosts.append(host)
        args.decoder_hosts = new_hosts
    if args.decoder_ports_inc is not None:
        new_ports = []
        for port, inc in zip(args.decoder_ports, args.decoder_ports_inc):
            for i in range(inc):
                new_ports.append(int(port) + i)
        args.decoder_ports = new_ports

    args.prefiller_instances = list(
        zip(args.prefiller_hosts, args.prefiller_ports))
    args.decoder_instances = list(zip(args.decoder_hosts, args.decoder_ports))
    return args


@asynccontextmanager
async def lifespan(fastapi_app: FastAPI):
    global proxy_state
    service_discovery = VllmServiceDiscovery(global_args)
    proxy_state = ProxyState(service_discovery)
    logger.info(
        f"Initialized {len(proxy_state.prefillers)} prefill clients and {len(proxy_state.decoders)} decode clients."
    )
    
    # Start periodic refresh task
    async def refresh_loop():
        while True:
            await asyncio.sleep(global_args.discovery_interval)
            try:
                proxy_state.refresh_instances()
            except Exception as e:
                logger.error("Error refreshing instances: %s", e)
    
    refresh_task = asyncio.create_task(refresh_loop())
    
    yield

    refresh_task.cancel()
    try:
        await refresh_task
    except asyncio.CancelledError:
        pass
    
    service_discovery.stop_discovery()
    for p in proxy_state.prefillers.values():
        await p.client.aclose()
    for d in proxy_state.decoders.values():
        await d.client.aclose()


async def listen_for_disconnect(request: Request) -> None:
    """Return if a disconnect message is received"""
    while True:
        message = await request.receive()
        if message["type"] == "http.disconnect":
            break


def with_cancellation(handler_func):

    @functools.wraps(handler_func)
    async def wrapper(*args, **kwargs):
        request = kwargs["request"]
        handler_task = asyncio.create_task(handler_func(*args, **kwargs))
        cancellation_task = asyncio.create_task(listen_for_disconnect(request))
        done, pending = await asyncio.wait([handler_task, cancellation_task],
                                           return_when=asyncio.FIRST_COMPLETED)
        for task in pending:
            task.cancel()
        if handler_task in done:
            return handler_task.result()
        return None

    return wrapper


app = FastAPI(lifespan=lifespan)


async def send_request_to_service(request: ServiceRequest):
    """Send request to service with retry logic"""
    aborted_requests = proxy_state.aquire_aborted_prefiller_requests(
        request.prefiller_id)
    data = request.req_data.copy()
    data['kv_transfer_params'] = {
        "do_remote_decode": True,
        "do_remote_prefill": False,
        "remote_engine_id": None,
        "remote_block_ids": None,
        "remote_host": None,
        "remote_port": None,
        "aborted_request": list(aborted_requests),
    }
    data["stream"] = False
    data["max_tokens"] = 1
    if "stream_options" in data:
        del data["stream_options"]
    headers = {
        "Authorization": f"Bearer {os.environ.get('OPENAI_API_KEY')}",
        "X-Request-Id": request.request_id
    }
    last_exc = None
    for attempt in range(1, request.max_retries + 1):
        try:
            response = await request.client.post(request.endpoint,
                                         json=data,
                                         headers=headers)
            response.raise_for_status()
            return response
        except (httpx.RequestError, httpx.HTTPStatusError) as e:
            logger.warning(
                "Attempt %s failed for %s: %s", attempt, request.endpoint, str(e))
            last_exc = e
            if attempt < request.max_retries:
                await asyncio.sleep(request.base_delay * (2**(attempt - 1)))
            else:
                logger.error(
                    "All %s attempts failed for %s.", request.max_retries, request.endpoint)
                raise last_exc from e


async def forward_metrics_request(prefiller_idx: str,
                                  request: Request,
                                  max_retries: int = 3,
                                  base_delay: float = 0.2):
    """Forward metrics request to the specified prefiller server."""
    prefiller = proxy_state.prefillers[prefiller_idx]
    
    # Get the headers
    headers = dict(request.headers)
    
    # Remove headers that shouldn't be forwarded
    headers.pop("host", None)
    headers.pop("content-length", None)
    
    async def generate_metrics_stream(response):
        async for chunk in response.aiter_bytes():
            yield chunk

    last_exc = None
    for attempt in range(1, max_retries + 1):
        try:
            async with prefiller.client.stream("GET", "/metrics", headers=headers) as response:
                response.raise_for_status()
                
                # Log the response body
                response_body = await response.aread()
                # logger.info(f"Metrics response from prefiller {prefiller_idx}: {response_body.decode()}")
                
                # Return streaming response
                return StreamingResponse(
                    generate_metrics_stream(response),
                    media_type=response.headers.get("content-type", "application/json")
                )
                
        except (httpx.RequestError, httpx.HTTPStatusError) as e:
            logger.warning(
                "Attempt %s failed for metrics on prefiller %s: %s", attempt, prefiller_idx, str(e))
            last_exc = e
            if attempt < max_retries:
                await asyncio.sleep(base_delay * (2**(attempt - 1)))
            else:
                logger.error(
                    "All %s attempts failed for metrics on prefiller %s.", max_retries, prefiller_idx)
                raise last_exc from e


async def stream_service_response_with_retry(request: ServiceRequest):
    """Stream service response with retry logic"""
    headers = {
        "Authorization": f"Bearer {os.environ.get('OPENAI_API_KEY')}",
        "X-Request-Id": request.request_id
    }
    for attempt in range(1, request.max_retries + 1):
        try:
            async with request.client.stream("POST",
                                     request.endpoint,
                                     json=request.req_data,
                                     headers=headers) as response:
                response.raise_for_status()
                first_chunk_sent = False
                async for chunk in response.aiter_bytes():
                    first_chunk_sent = True
                    yield chunk
                return  # Success, exit after streaming
        except (httpx.RequestError, httpx.HTTPStatusError) as e:
            if attempt < request.max_retries:
                logger.warning(
                    "Attempt %s failed for streaming %s: %s", attempt, request.endpoint, str(e))
                await asyncio.sleep(request.base_delay * (2**(attempt - 1)))
            else:
                logger.error(
                    "All %s attempts failed for streaming %s.", request.max_retries, request.endpoint)
                raise e
        except Exception as e:
            # If any chunk has been sent, do not retry, just log and drop
            if 'first_chunk_sent' in locals() and first_chunk_sent:
                logger.error(
                    "Streaming to client interrupted after response started: %s", str(e))
                return
            else:
                if attempt < request.max_retries:
                    logger.warning(
                        "Attempt %s failed for streaming %s: %s", attempt, request.endpoint, str(e))
                    await asyncio.sleep(request.base_delay * (2**(attempt - 1)))
                else:
                    logger.error(
                        "All %s attempts failed for streaming %s.", request.max_retries, request.endpoint)
                    raise e


def extract_node_overrides(
        request: Request) -> Optional[Tuple[Tuple[str, int], Tuple[str, int]]]:
    """
    根据请求头解析用户指定的 prefiller/decoder 节点 (host, port)。
    只有当两个 header 同时存在且解析成功时才返回覆盖信息。
    """
    prefill_header = request.headers.get(HEADER_PREFILL_POD_ADDRESS_PORT)
    decode_header = request.headers.get(HEADER_DECODE_POD_ADDRESS_PORT)

    if not prefill_header or not decode_header:
        return None

    try:
        prefill_host, prefill_port = prefill_header.split(":")
        decode_host, decode_port = decode_header.split(":")
        prefill_host = prefill_host.strip()
        decode_host = decode_host.strip()
        prefill_port = int(prefill_port.strip())
        decode_port = int(decode_port.strip())
        return ((prefill_host, prefill_port), (decode_host, decode_port))
    except (ValueError, AttributeError) as e:
        logger.error(
            "Failed to parse node overrides from headers (%s, %s): %s",
            prefill_header,
            decode_header,
            e,
        )
        raise HTTPException(
            status_code=400,
            detail="Invalid node override headers: expected <ip>:<port> format.",
        ) from e


async def _handle_completions(api: str, request: Request):
    try:
        req_data = await request.json()
        req_body = await request.body()
        request_length = len(req_body)
        is_stream = req_data.get("stream", False)
        media_type = "application/json" if not is_stream else "text/event-stream"
        prefiller_score = proxy_state.calculate_prefill_scores(request_length)
        logger.debug(
            "Request length: %s, Prefiller score: %s", request_length, prefiller_score)
        request_id = await proxy_state.next_req_id()
        # 根据 header 提供的覆盖信息，优先尝试使用指定节点
        node_overrides = extract_node_overrides(request)
        prefiller_idx: Optional[str] = None

        if node_overrides is not None:
            (prefill_host, prefill_port), _ = node_overrides
            try:
                prefiller_idx = proxy_state.select_prefiller_by_address(
                    prefill_host, prefill_port, prefiller_score)
                logger.info(
                    "Using specified prefiller %s:%d for request %s",
                    prefill_host,
                    prefill_port,
                    request_id,
                )
            except RuntimeError as e:
                logger.error(
                    "Specified prefiller %s:%d unavailable: %s",
                    prefill_host,
                    prefill_port,
                    e,
                )
                raise HTTPException(
                    status_code=400,
                    detail=(
                        "Requested prefiller node is not registered with the proxy. "
                        f"Check {HEADER_PREFILL_POD_ADDRESS_PORT} header."
                    ),
                ) from e

        if prefiller_idx is None:
            prefiller_idx = proxy_state.select_prefiller(prefiller_score)
        prefiller = proxy_state.prefillers[prefiller_idx]
        # Send request to prefiller
        service_request = ServiceRequest(
            client=prefiller.client,
            prefiller_id=prefiller_idx,
            endpoint=api,
            req_data=req_data,
            request_id=request_id,
            max_retries=global_args.max_retries,
            base_delay=global_args.retry_delay
        )
        response = await send_request_to_service(service_request)
        proxy_state.release_prefiller(prefiller_idx, prefiller_score)
        response_json = response.json()
        kv_transfer_params = response_json.get('kv_transfer_params', {})
        if kv_transfer_params:
            req_data["kv_transfer_params"] = kv_transfer_params
        # Select decoder
        decoder_score = proxy_state.calculate_decode_scores(request_length)
        logger.debug("Decoder score: %f", decoder_score)
        decoder_idx: Optional[str] = None

        if node_overrides is not None:
            try:
                _, (decode_host, decode_port) = node_overrides
                decoder_idx = proxy_state.select_decoder_by_address(
                    decode_host, decode_port, decoder_score)
                logger.info(
                    "Using specified decoder %s:%d for request %s",
                    decode_host,
                    decode_port,
                    request_id,
                )
            except RuntimeError as e:
                logger.error(
                    "Specified decoder %s:%d unavailable: %s",
                    decode_host,
                    decode_port,
                    e,
                )
                raise HTTPException(
                    status_code=400,
                    detail=(
                        "Requested decoder node is not registered with the proxy. "
                        f"Check {HEADER_DECODE_POD_ADDRESS_PORT} header."
                    ),
                ) from e

        # Use the prefiller's kv_transfer_params to select decoder
        if decoder_idx is None:
            decoder_idx = proxy_state.select_decoder(decoder_score)
        decoder = proxy_state.decoders[decoder_idx]
        logger.debug("Using %s %s", prefiller.url, decoder.url)
        # Stream response from decoder
        released_kv = False

        async def generate_stream():
            nonlocal released_kv
            # Only one await per chunk, minimal logic in loop
            try:
                service_request = ServiceRequest(
                        client=decoder.client,
                        prefiller_id=prefiller_idx,  # 虽然是decoder但保持参数名一致
                        endpoint=api,
                        req_data=req_data,
                        request_id=request_id,
                        max_retries=global_args.max_retries,
                        base_delay=global_args.retry_delay
                    )
                async for chunk in stream_service_response_with_retry(service_request):
                    if not released_kv and chunk:
                        proxy_state.release_prefiller_kv(
                            prefiller_idx, prefiller_score)
                        released_kv = True
                    yield chunk
            except Exception as e:
                error_msg = (
                    "Error during streaming from decoder %s: %s "
                    "the aborted request %s will be routing to the target prefiller "
                    "when new request is ready to dispatch to it"
                )
                logger.error(error_msg, decoder.url, str(e), request_id)
                proxy_state.abort_prefiller_request(prefiller_idx, request_id)
                proxy_state.release_prefiller_kv(prefiller_idx,
                                                 prefiller_score)

            # After streaming done, release tokens
            proxy_state.release_decoder(decoder_idx, decoder_score)

        headers = {}
        if is_stream:
            headers["Cache-Control"] = "no-cache"
            headers["Connection"] = "keep-alive"
            headers["X-Accel-Buffering"] = "no"
        return StreamingResponse(generate_stream(),
                                 media_type=media_type, headers=headers)
    except Exception as e:
        import traceback
        exc_info = sys.exc_info()
        logger.error("Error occurred in disagg prefill proxy server"
              " - %s endpoint", api)
        logger.error(e)
        logger.error("".join(traceback.format_exception(*exc_info)))
        raise


@app.post("/v1/completions")
@with_cancellation
async def handle_completions(request: Request):
    return await _handle_completions("/v1/completions", request)


@app.post("/v1/chat/completions")
@with_cancellation
async def handle_chat_completions(request: Request):
    return await _handle_completions("/v1/chat/completions", request)


@app.get("/healthcheck")
async def healthcheck():
    return {
        "status": "ok",
        "prefill_instances": len(proxy_state.prefillers),
        "decode_instances": len(proxy_state.decoders)
    }


@app.get("/metrics")
async def handle_metrics(request: Request):
    """Forward metrics request to the next prefiller that will be used for /v1/chat/completions."""
    try:
        # Get the next prefiller index that will be used for /v1/chat/completions
        next_prefiller_idx = proxy_state.determine_next_prefiller()
        
        logger.debug("Forwarding metrics request to prefiller %s", next_prefiller_idx)
        
        # Forward the request to the selected prefiller and return the streaming response
        return await forward_metrics_request(
            next_prefiller_idx,
            request,
            max_retries=global_args.max_retries,
            base_delay=global_args.retry_delay
        )
        
    except Exception as e:
        import traceback
        exc_info = sys.exc_info()
        logger.error("Error occurred in metrics proxy server")
        logger.error(e)
        logger.error("".join(traceback.format_exception(*exc_info)))
        raise


if __name__ == '__main__':
    global global_args
    global_args = parse_args()
    import uvicorn
    uvicorn.run(app, host=global_args.host, port=global_args.port)