from abc import ABC, abstractmethod
from pydantic import BaseModel, Field
from fastapi import status, HTTPException
from fastapi.responses import StreamingResponse
from typing import Callable
import asyncio
import functools
import httpx
import logging

from motor.config.coordinator import CoordinatorConfig
from motor.coordinator.models.request import RequestInfo, ReqState
from motor.coordinator.core.instance_healthchecker import InstanceHealthChecker
from motor.coordinator.scheduler.scheduler import Scheduler
from motor.resources.endpoint import Endpoint, WorkloadAction
from motor.resources.instance import PDRole
from motor.resources.instance import Instance

logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)

class ScheduledResource(BaseModel):
    """Represents a scheduled resource with an instance and endpoint"""
    instance: Instance = None
    endpoint: Endpoint = None

def handle_request_errors(stream = True):
    """Decorator to handle request errors with retry logic
    
    Args:
        stream: Whether the function returns a stream or a single response
        
    Returns:
        Decorator function that wraps the target function with error handling
    """
    def decorator(func: Callable):
        @functools.wraps(func)
        async def wrapper(*args, **kwargs):
            if (not args or len(args) == 0
                or 'resource' not in kwargs or not isinstance(kwargs['resource'], ScheduledResource) 
                or 'req_data' not in kwargs or not isinstance(kwargs['req_data'], dict)):
                    if stream:
                        async for chunk in func(*args, **kwargs):
                            yield chunk
                    else:
                        result = await func(*args, **kwargs)
                        yield result
                    return
                
            last_exc = None
            self = args[0] if args else None
            resource: ScheduledResource = kwargs['resource']
            req_data = kwargs['req_data']
            retry_delay = 0.2
            
            endpoint = resource.endpoint
            logger.debug(f"Forwarding request to instance at {endpoint.ip}:{endpoint.port} with data: {req_data}")
            for attempt in range(CoordinatorConfig().exception_config.max_retry):
                try:
                    if stream:
                        async for chunk in func(*args, **kwargs):
                            yield chunk
                    else:
                        result = await func(*args, **kwargs)
                        yield result
                    return
                except httpx.HTTPStatusError as e:
                    # Try to get http status code
                    if hasattr(e, 'response') and hasattr(e.response, 'status_code'):
                        status_code = e.response.status_code
                        logger.warning(f"HTTP error, status code: {status_code}")
                        # 4XX: Client request error, return directly
                        if status_code >= status.HTTP_400_BAD_REQUEST and status_code < status.HTTP_500_INTERNAL_SERVER_ERROR:
                            self.req_info.update_state(ReqState.INVALID)
                            logger.warning(f"User error")
                            raise
                    else:
                        # 5XX: Server internal error, retry request
                        logger.warning(f"HTTP error, but fail to parse status code: {e}")
                    last_exc = e

                except httpx.RequestError as e:
                    # Handle all request errors including ProxyError, ConnectError, etc.
                    logger.warning(f"Request error: {e}")
                    # TODO: InstanceHealthChecker singleton
                    # InstanceHealthChecker("controller client").push_exception_instance(resource.instance, resource.endpoint)

                    self.req_info.update_state(ReqState.EXCEPTION)
                    # TODO: specific error type
                    if isinstance(e, httpx.ProxyError):
                        logger.warning("Proxy error")
                    elif isinstance(e, httpx.ConnectError):
                        logger.warning("Connect error")
                    elif isinstance(e, httpx.TimeoutException):
                        logger.warning("Timeout error")
                        self.req_info.update_state(ReqState.TIMEOUT)
                    else:
                        logger.warning(f"Unknown request error: {str(e)}")
                    raise
                except Exception as e:
                    logger.warning(
                        f"Failed for forwarding /{self.req_info.api}, error: {str(e)}"
                    )
                    # If any chunk has been sent, do not retry, just log and drop
                    if self.first_chunk_sent:
                        logger.error(
                            f"Streaming to client interrupted after response started: {str(e)}"
                        )
                        self.req_info.update_state(ReqState.EXCEPTION)
                        raise
                    last_exc = e
                
                if last_exc and attempt == CoordinatorConfig().exception_config.max_retry - 1:
                    logger.error(f"Stream request forwarding failed, reach max retries {CoordinatorConfig().exception_config.max_retry}")
                    self.req_info.update_state(ReqState.EXCEPTION)
                    raise last_exc
                    
                logger.warning(f"Attempt failed for request {endpoint.ip}, retrying {attempt + 1}/{CoordinatorConfig().exception_config.max_retry}")
                await asyncio.sleep(retry_delay * (2**(attempt - 1)))
        return wrapper
    return decorator

class BaseRouter(ABC):
    """Base router class for handling requests with different instance configurations"""
    
    def __init__(self, req_info: RequestInfo):
        """Initialize the base router with request information
        
        Args:
            req_info: Request information object containing request details
        """
        self.req_info = req_info
        self.first_chunk_sent = False
    
    @abstractmethod
    async def handle_request(self) -> StreamingResponse:
        """Handle the request based on specific implementation
        
        Returns:
            StreamingResponse: The response stream for the request
        """
        pass
    
    # @timeout_handler(CoordinatorConfig().exception_config.schedule_timeout)
    def prepare_resource(self, role: PDRole):
        """Prepare resource for the given role by scheduling an instance
        
        Args:
            role: The role (PDRole) to prepare resource for
            
        Returns:
            tuple: A tuple containing the scheduled instance and endpoint
            
        Raises:
            Exception: If scheduling fails after maximum retry attempts
        """
        self.req_info.update_state(ReqState.P_SCHEDULING if role == PDRole.ROLE_P else ReqState.D_SCHEDULING)

        for i in range(CoordinatorConfig().exception_config.max_retry):
            result = Scheduler().select_instance_and_endpoint(role)
            logger.debug(f"Scheduling attempt {i + 1} for role {role}, got result: {result}")
            # Check return value, ensure it's iterable with two elements
            if result and isinstance(result, (tuple, list)) and len(result) == 2 and all(result):
                ins, endpoint = result
                break
            logger.warning(f"Scheduling failed, role:{role}, retrying {i + 1}/{CoordinatorConfig().exception_config.max_retry}")
            if i == CoordinatorConfig().exception_config.max_retry - 1:
                self.req_info.update_state(ReqState.EXCEPTION)
                raise HTTPException(
                    status_code=status.HTTP_503_SERVICE_UNAVAILABLE, 
                    detail=f"Scheduling failed, role:{role}"
                )
        logger.info(f"Scheduled instance: {ins.job_name}, role: {role}")

        # If scheduler returns normally, it means allocation was successful
        self.req_info.update_state(ReqState.P_ALLOCATED if role == PDRole.ROLE_P else ReqState.D_ALLOCATED)
        if not self._handle_request_event(ScheduledResource(instance=ins, endpoint=endpoint)):
            self.req_info.update_state(ReqState.EXCEPTION)
            raise HTTPException(
                status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, 
                detail=f"Allocation failed, role:{role}"
            )
        logger.info(f"Allocated instance: {ins.job_name}, role: {role}")
        return ins, endpoint
    
    @handle_request_errors(stream=True)
    async def forward_stream_request(self, req_data: dict, resource: ScheduledResource):
        """Forward streaming request to the given endpoint
        
        Args:
            req_data: The request data to forward
            resource: The scheduled resource containing the endpoint
            
        Yields:
            Bytes of the response stream
        """
        logger.debug(f"Decode request: {req_data}")
        endpoint = resource.endpoint
        headers = {
            'Content-Type': 'application/json',
            'X-Request-Id': self.req_info.req_id
        }
        
        stream_flag = bool(self.req_info.req_data.get("stream", False))
        
        async with httpx.AsyncClient(timeout= CoordinatorConfig().exception_config.first_token_timeout if stream_flag 
                                     else CoordinatorConfig().exception_config.infer_timeout,
                                    base_url=f"http://{endpoint.ip}:{endpoint.port}",
                                    verify=False) as client:
            async with client.stream("POST",
                                        f"/{self.req_info.api}",
                                        json=req_data,
                                        headers=headers) as response:
                response.raise_for_status()
                self.first_chunk_sent = False
                async for chunk in response.aiter_bytes():
                    if not self.first_chunk_sent and chunk:
                        self.first_chunk_sent = True
                        self.req_info.update_state(ReqState.FIRST_TOKEN_FINISH)
                    yield chunk
                return

    @handle_request_errors(stream=False)
    async def forward_request(self, req_data: dict, resource: ScheduledResource):
        """Forward non-streaming request to the given resource
        
        Args:
            req_data: The request data to forward
            resource: The scheduled resource containing the endpoint
            
        Returns:
            The response from the endpoint
        """
        endpoint = resource.endpoint
        headers = {
            'Content-Type': 'application/json',
            'X-Request-Id': self.req_info.req_id
        }
        async with httpx.AsyncClient(timeout=None,
                                    base_url=f"http://{endpoint.ip}:{endpoint.port}",
                                    verify=False) as client:

            response = await client.post(f"/{self.req_info.api}",
                                            json=req_data,
                                            headers=headers)
            response.raise_for_status()
            return response

    def _handle_request_event(self, resource: ScheduledResource):
        """Release the given resource by event trigger
        
        Args:
            resource: The scheduled resource to release
            
        Returns:
            The result of release
        """
        
        logger.info(f"Pushing event req_state={self.req_info.state}")
        
        if not(resource and isinstance(resource, ScheduledResource) and resource.instance and resource.endpoint):
            logger.warning("Resource is empty")
            return False
        
        instance = resource.instance
        endpoint = resource.endpoint
        req_state = self.req_info.state
        req_id = self.req_info.req_id
        req_len = self.req_info.req_len
        
        result = False
        if req_state == ReqState.P_ALLOCATED or req_state == ReqState.D_ALLOCATED:
            result = Scheduler().update_workload(instance, endpoint, req_id, WorkloadAction.ALLOCATION, req_len) 
        elif req_state == ReqState.PREFILL_END:
            result = Scheduler().update_workload(instance, endpoint, req_id, WorkloadAction.RELEASE_TOKENS, req_len) 
        elif req_state == ReqState.DECODE_END:
            result = Scheduler().update_workload(instance, endpoint, req_id, WorkloadAction.RELEASE_KV, req_len) 
            result &= Scheduler().update_workload(instance, endpoint, req_id, WorkloadAction.RELEASE_TOKENS, req_len) 
        elif req_state == ReqState.FIRST_TOKEN_FINISH:
            result = Scheduler().update_workload(instance, endpoint, req_id, WorkloadAction.RELEASE_KV, req_len) 
        elif req_state == ReqState.RECOMPUTE or req_state == ReqState.EXCEPTION:
            result = Scheduler().update_workload(instance, endpoint, req_id, WorkloadAction.RELEASE_KV, req_len) 
            result &= Scheduler().update_workload(instance, endpoint, req_id, WorkloadAction.RELEASE_TOKENS, req_len) 
        return result
