from fastapi.responses import StreamingResponse
from fastapi import HTTPException, status
import logging
import json

from motor.coordinator.models.request import ReqState
from motor.coordinator.router.base_router import BaseRouter, ScheduledResource
from motor.config.coordinator import CoordinatorConfig
from motor.resources.instance import PDRole

logger = logging.getLogger(__name__)

class SeparatePDRouter(BaseRouter):
    """Handle request with separate P and D instances (original behavior)"""
    
    def __init__(self, req_info):
        super().__init__(req_info)
        self.retry = True  # Need to re-request when recomputing
        self.retry_count = 0    # Recomputation count
        self.total_generated_token = "" # Record all generated tokens during recomputation
        
    async def handle_request(self) -> StreamingResponse:
        """Handle request with separate P and D instances"""
        
        async def generate_stream():
            while self.retry:
                self.first_chunk_sent = False
                self.retry = False
                
                prefill_resource: ScheduledResource = None
                try:
                    # Schedule P instance
                    prefill_instance, prefill_endpoint = self.prepare_resource(PDRole.ROLE_P)
                    prefill_resource = ScheduledResource(instance=prefill_instance, endpoint=prefill_endpoint)
                    # Forward P request
                    p_resp_json = await self.__forward_p_request(prefill_resource)
                    logger.debug("Prefill response received: %s", p_resp_json)
                except Exception as e:
                    logger.error("Error occurred while forwarding P request: %s", e)
                    raise e
                finally:
                    # When forwarding is successful, state==PREFILL_END only releases p tokens
                    # When forwarding fails, state==EXCEPTION releases p tokens and kvcache
                    self._handle_request_event(prefill_resource)

                decode_resource: ScheduledResource = None
                try:
                    # Schedule D instance
                    decode_instance, decode_endpoint = self.prepare_resource(PDRole.ROLE_D)
                    decode_resource = ScheduledResource(instance=decode_instance, endpoint=decode_endpoint)
                    # Forward D request
                    async for chunk in self.__forward_d_request(p_resp_json, prefill_resource, decode_resource):
                        yield chunk
                except Exception as e:
                    logger.error("Error occurred while forwarding Decode request: %s", e)
                    raise e
                finally:
                    # After streaming done or error occurred, release tokens
                    self._handle_request_event(decode_resource)
        return StreamingResponse(generate_stream(),
                                 media_type="application/json")
    
    def __gen_p_request(self) -> dict:
        """Generate P request parameters"""
        req_data = self.req_info.req_data.copy()
        req_data['kv_transfer_params'] = {
            "do_remote_decode": True,
            "do_remote_prefill": False,
            "remote_engine_id": None,
            "remote_block_ids": None,
            "remote_host": None,
            "remote_port": None,
            "aborted_request": [],
        }
        req_data["stream"] = False
        req_data["max_tokens"] = 1
        req_data["min_tokens"] = 1
        if "stream_options" in req_data:
            del req_data["stream_options"]
        return req_data

    async def __forward_p_request(self, resource: ScheduledResource):
        """Forward P request to the given resource"""
        req_data = self.__gen_p_request()
        # P non-streaming request
        async for response in self.forward_request(req_data=req_data, resource=resource):
        # response = await self.forward_request(req_data=req_data, resource=resource)
            logger.debug(f"Prefill response: {response}")
            resp_json = response.json()
            self.req_info.update_state(ReqState.PREFILL_END)
        return resp_json

    def __gen_d_request(self, resp_json: dict) -> dict:
        """Generate D request parameters"""
        req_data = self.req_info.req_data.copy()
        kv_transfer_params = resp_json.get('kv_transfer_params', {})
        if kv_transfer_params:
            req_data["kv_transfer_params"] = kv_transfer_params
        return req_data

    async def __forward_d_request(self, resp_json: dict,
                                 prefill_resource: ScheduledResource,
                                 decode_resource: ScheduledResource):
        """Forward D request to the given resource"""
        # Only one await per chunk, minimal logic in loop
        try:
            req_data = self.__gen_d_request(resp_json)
            
            stream_flag = bool(req_data.get("stream", False))
            chat_flag = "messages" in req_data
            if "prompt" in req_data:
                origin_prompt = req_data["prompt"]
            elif chat_flag:
                messages = req_data["messages"]
                origin_prompt = messages[0].get("content", "")
            else:
                origin_prompt = ""
            # refer to vLLM sampling_params: max_token default value
            origin_max_tokens = req_data.get("max_tokens", 16)
            
            generated_token = ""
            completion_tokens = 0
            
            release_kv = False
            async for chunk in self.forward_stream_request(req_data=req_data, resource=decode_resource):
                if not release_kv and chunk:
                    release_kv = True
                    self._handle_request_event(prefill_resource)
                # Parse response to determine if recomputation is needed
                try:
                    chunk_str = chunk.decode("utf-8").strip()
                except UnicodeDecodeError:
                    logger.debug(f"Skipping chunk: {chunk}")
                    yield chunk
                    continue
                if not chunk_str:
                    continue
                if chunk_str.startswith("data: "):
                    chunk_str = chunk_str[len("data: "):]
                try:
                    chunk_json = json.loads(chunk_str)
                    logger.info(f"Received chunk_json: {chunk_json}")
                except json.JSONDecodeError:
                    # if chunk is [done], skip it.
                    logger.debug(f"Skipping chunk: {chunk_str}")
                    yield chunk
                    continue
                choices = chunk_json.get("choices", [])
                if not choices:
                    yield chunk
                    continue
                
                # Get incremental token
                choice = choices[0]
                delta = choice.get("delta") or {}       # Streaming
                message = choice.get("message") or {}   # Non-streaming
                # When streaming triggers recompute, 'content' is empty
                content = (
                        delta.get("content")
                        or message.get("content")
                        or choice.get("text")
                        or ""
                        )
                generated_token += content
                
                stop_reason = choice.get("stop_reason")
                usage = chunk_json.get("usage", {})     # Tokens consumed in non-streaming
                # Streaming is always 1 token per time
                completion_tokens = (completion_tokens + 1) if stream_flag else \
                    (completion_tokens + usage.get("completion_tokens", 0))
                # Check recomputation keyword 'recomputed'
                if stop_reason == "recomputed":
                    if self.retry_count >= CoordinatorConfig().exception_config.max_retry:
                        raise HTTPException(status.HTTP_507_INSUFFICIENT_STORAGE, "Insufficient compute resource")
                    
                    self.retry = True
                    self.req_info.update_state(ReqState.RECOMPUTE)
                    self.total_generated_token += generated_token
                    self._handle_request_event(prefill_resource)
                    self._handle_request_event(decode_resource)
                    
                    self.retry_count += 1
                    new_prompt = origin_prompt + generated_token
                    if chat_flag:
                        messages[0]["content"] = new_prompt
                    else:
                        req_data["prompt"] = new_prompt
                    # When streaming recomputes, add one more completion_tokens
                    req_data["max_tokens"] = origin_max_tokens - completion_tokens + (1 if stream_flag else 0)
                    self.req_info.req_len = len(json.dumps(req_data).encode("utf-8"))
                    self.req_info.req_data = req_data
                    logger.info(f"Recomputing request {self.req_info.req_id}, retry count: {self.retry_count}, new req_info: {self.req_info}")
                    return
                if self.retry_count > 0 and not stream_flag:
                    self.total_generated_token += generated_token
                    # When non-streaming and recomputation has been done, return all accumulated responses at once
                    if chat_flag:
                        choice["message"]["content"] = self.total_generated_token
                    else:
                        choice["text"] = self.total_generated_token
                    chunk = json.dumps(chunk_json).encode("utf-8")
                yield chunk               
            self.req_info.update_state(ReqState.DECODE_END)
            self._handle_request_event(decode_resource)
            logger.info(f"Completed streaming for request {self.req_info}")
        except Exception as e:
            if not self.first_chunk_sent:
                self._handle_request_event(prefill_resource)
            logger.error(
                f"Error during streaming from decoder {self.req_info.api}: {str(e)} the aborted request {self.req_info.req_id} "
            )
            raise