import time
from typing import List, Union, Optional, AsyncGenerator

import asyncio
from tqdm import tqdm
import argparse

from mixserve.config import (
    ModelConfig,
    ParallelConfig,
    CacheConfig,
    DisaggParallelConfig,
    ContextStageSchedConfig,
    DecodingStageSchedConfig
)

from mixserve.single_stage_engine import StepOutput
from mixserve.engine import LLMEngine
from mixserve.logger import init_logger
from mixserve.request import  SamplingParams,create_request
from mixserve.driverengine import DriverEngine
from mixserve.utils import Prompt_struct,Counter
logger = init_logger(__name__)

class OfflineDriverLLM:
    """A wrapper of DriverEngine for offline synchronous inference."""

    def __init__(
        self,
        model_config: ModelConfig,
        disagg_parallel_config: DisaggParallelConfig,
        cache_config: CacheConfig,
        context_sched_config: ContextStageSchedConfig,
        decoding_sched_config: DecodingStageSchedConfig,
        profile_first:bool,
        profiling: bool = False
    ):
        self.driver = DriverEngine(
            model_config=model_config,
            disagg_parallel_config=disagg_parallel_config,
            cache_config=cache_config,
            context_sched_config=context_sched_config,
            decoding_sched_config=decoding_sched_config,
            profile_first=profile_first
        )
        # Initialize asynchronously
    async def async_init(self):
        await self.driver.async_init()
        asyncio.create_task(self.driver.continuous_decide_loop())
        
    async def generate(
        self,
        prompts: Optional[Union[List[str], str]] = None,
        prompt_token_ids: Optional[List[List[int]]] = None,
        sampling_params: Optional[Union[SamplingParams, List[SamplingParams]]] = None,
        TTFT_SLO: Optional[float] = None,
        TPOT_SLO: Optional[float] = None,
        use_tqdm: bool = True
    ) -> List[List[StepOutput]]:
        return await self.driver.generate(prompts, prompt_token_ids, sampling_params, TTFT_SLO, TPOT_SLO, use_tqdm)


    
class AsyncDriverLLM:
    """A wrapper of DriverEngine for online async inference."""

    def __init__(
        self,
        model_config: ModelConfig,
        disagg_parallel_config: DisaggParallelConfig,
        cache_config: CacheConfig,
        context_sched_config: ContextStageSchedConfig,
        decoding_sched_config: DecodingStageSchedConfig
    ):
        self.driver = DriverEngine(
            model_config=model_config,
            disagg_parallel_config=disagg_parallel_config,
            cache_config=cache_config,
            context_sched_config=context_sched_config,
            decoding_sched_config=decoding_sched_config,
            profile_first=False
        )
        self.request_counter=Counter()
        asyncio.run(self.driver.async_init())
        
    def from_engine_args(
        args: argparse.Namespace
    ):
        return AsyncDriverLLM(
            model_config=ModelConfig(
                model=args.model,
                tokenizer=args.tokenizer,
                trust_remote_code=args.trust_remote_code,
                seed=args.seed,
                use_dummy_weights=args.use_dummy_weights
            ),
            disagg_parallel_config=DisaggParallelConfig(
                context=ParallelConfig(
                    tensor_parallel_size=args.context_tensor_parallel_size,
                    pipeline_parallel_size=args.context_pipeline_parallel_size
                ),
                decoding=ParallelConfig(
                    tensor_parallel_size=args.decoding_tensor_parallel_size,
                    pipeline_parallel_size=args.decoding_pipeline_parallel_size
                )
            ),
            cache_config=CacheConfig(
                block_size=args.block_size,
                max_num_blocks_per_req=args.max_num_blocks_per_req,
                gpu_memory_utilization=args.gpu_memory_utilization,
                cpu_swap_space=args.swap_space
            ),
            context_sched_config=ContextStageSchedConfig(
                policy=args.context_sched_policy,
                max_batch_size=args.context_max_batch_size,
                max_tokens_per_batch=args.context_max_tokens_per_batch
            ),
            decoding_sched_config=DecodingStageSchedConfig(
                policy=args.decoding_sched_policy,
                max_batch_size=args.decoding_max_batch_size,
                max_tokens_per_batch=args.decoding_max_tokens_per_batch,
                model_name=args.model,
                waiting_block_prop_threshold=0.05
            )
        )
    async def start_event_loop(self):
        await self.driver.engine.start_all_event_loops()
        
    def get_and_pop_request_lifetime_events(self, request_id: str):
        return self.driver.engine.request_lifetime_events.pop(request_id)
    
    async def generate(
        self,
        request_id: int,
        prompt: Optional[str] = None,
        prompt_token_ids: Optional[List[int]] = None,
        sampling_params: SamplingParams = SamplingParams(),
        TTFT_SLO: Optional[float] = None,
        TPOT_SLO: Optional[float] = None
    ) -> AsyncGenerator[StepOutput, None]:
        # Wrap the generate logic per request, assuming it matches LLMEngine.generate
        req = create_request(
            prompt,
            prompt_token_ids,
            sampling_params,
            self.request_counter,
            self.driver.engine.tokenizer,
            arrival_time=time.time(),
            request_id=request_id
        )
        
        self.driver.prompts[req.request_id] = Prompt_struct(
            len(req.prompt_token_ids),
            output_length=sampling_params.max_tokens,
            TTFT_SLO=TTFT_SLO or 0.4,
            TPOT_SLO=TPOT_SLO or 0.04,
            current_length=0
        )
        waiting_queue_on_context_engine=self.driver.engine.context_engine.scheduler.waiting_queue
        running_queue_on_decoding_engine=self.driver.engine.decoding_engine.scheduler.waiting_queue
    
        num_tokens=sum(len(req.prompt_token_ids) for req in waiting_queue_on_context_engine)
        budget=min(num_tokens,self.driver.engine.context_engine.sched_config.chunk_size)
        TTFT_on_prefill=self.driver.ttft_predictor.predict_TTFT(self.driver.prefill_instance,budget)

        requests=[]
        for request in running_queue_on_decoding_engine:
            requests.append(self.driver.prompts.get(request.request_id))
        if len(requests)!=0:
            TPOT_on_engine=self.driver.tpot_predictor.predict_TPOT(self.driver.decode_instance,requests)
        else:
            TPOT_on_engine=0.05
        SLO_information=self.driver.prompts[req.request_id]
        req.local_token=SLO_information.calculate_local_length(TTFT_on_prefill,TPOT_on_engine)
        async for step_output in self.driver.engine.generate(
            req
        ):
            yield step_output