from typing import List,Optional,Tuple,Union
from dataclasses import dataclass
import argparse
import os
import asyncio
import glob
from importlib.resources import files


from mixserve.profiler import Profiler
from mixserve.request import SamplingParams
from mixserve.utils import  get_logical_to_physical_map,Counter,Prompt_struct
from mixserve.single_stage_engine import ContextStageLLMEngine,DecodingStageLLMEngine,StepOutput
from mixserve.engine import LLMEngine
from mixserve.predictor import TTFT_predictor,TPOT_predictor
from mixserve.request import create_request
from mixserve.config import (
    ModelConfig,
    DisaggParallelConfig,
    ParallelConfig,
    CacheConfig,
    ContextStageSchedConfig,
    DecodingStageSchedConfig
)

from mixserve.logger import init_logger

import traceback
'''
The driver_engine integrates the profiler, predictor, and llm_engine. 
It uses the predictor to compute the TTFT (Time To First Token) and TPOT (Time Per Output Token) metrics for the current instance in real time. 
Based on these predictions, it decides when each request on the current context instance should be migrated.
'''
logger=init_logger(__name__)
def get_gpu_topology(tp_size:int,pp_size:int,offset:int)->list[list[int]]:
    topology=[]
    for pp_rank in range(pp_size):
        tp_part=[]
        for tp_rank in range(tp_size):
            tp_part.append(offset+pp_rank*tp_size+tp_rank)
        topology.append(tp_part)
    return topology
            

class DriverEngine:
    """
    Keeps the information of profiler, predictor, and all instances.
    Run with one context engine and one decode engine
    """

    def __init__(self, model_config:ModelConfig, disagg_parallel_config:DisaggParallelConfig, cache_config:CacheConfig, context_sched_config:ContextStageSchedConfig, decoding_sched_config:DecodingStageSchedConfig,profile_first:bool):
        self.model_config = model_config
        self.disagg_parallel_config = disagg_parallel_config
        self.cache_config = cache_config
        self.context_sched_config = context_sched_config
        self.decoding_sched_config = decoding_sched_config
        self.request_counter = Counter()
        self.prompts:dict[int,Prompt_struct] = {}
        self.profile_first=profile_first
    async def async_init(self):
        if self.profile_first:
            '''delete previous data'''
            package_root = files('mixserve')
            db_path=os.path.join(package_root,'benchdb')
            file = glob.glob(os.path.join(db_path, "*"))
            for f in file:
                if os.path.isfile(f):
                    os.remove(f)
            '''start to profile'''
            self._init_profilers()
            await self.profile_all()

        num_gpus_allocated = 0
        self.prefill_instance = get_gpu_topology(
            self.disagg_parallel_config.context.tensor_parallel_size,
            self.disagg_parallel_config.context.pipeline_parallel_size,
            num_gpus_allocated,
        )
        num_gpus_allocated += self.disagg_parallel_config.context.tensor_parallel_size * self.disagg_parallel_config.context.pipeline_parallel_size
        self.decode_instance = get_gpu_topology(
            self.disagg_parallel_config.decoding.tensor_parallel_size,
            self.disagg_parallel_config.decoding.pipeline_parallel_size,
            num_gpus_allocated,
        )
        self.ttft_predictor = TTFT_predictor(self.model_config.Moe)
        self.tpot_predictor = TPOT_predictor(self.model_config.Moe)
        await self.initialize_predictor()

        self.engine = LLMEngine(
            self.model_config,
            self.disagg_parallel_config,
            self.cache_config,
            self.context_sched_config,
            self.decoding_sched_config,
            False
        )
        await self.engine.initialize()
        self._event_loop_task = asyncio.create_task(self.engine.start_all_event_loops())
        
        
    def _init_profilers(self):
        self.profiler= Profiler(
                cache_config=self.cache_config,
                model_config=self.model_config,
                dis_para_config=self.disagg_parallel_config,
                profile_config=self.context_sched_config,
                decode_profile_config=self.decoding_sched_config
            )
        
    async def shutdown(self):
        if self._event_loop_task:
            self._event_loop_task.cancel()
            try:
                await self._event_loop_task
            except asyncio.CancelledError:
                pass
    async def profile_all(self):
        await self.profiler.profile()
        self.profiler_initialized=True
    
    async def initialize_predictor(self):
        tasks = []
        
        pp_size = len(self.prefill_instance)
        tp_size = len(self.prefill_instance[0])

        for pp_idx in self.prefill_instance:
            for gpu_id in pp_idx:
                logger.info(f"Training gpu {gpu_id} of prefill part")
                tasks.append(self.ttft_predictor.train_formula_attention(gpu_id, tp_size, pp_size))
                tasks.append(self.ttft_predictor.train_formula_gemm(gpu_id, tp_size, pp_size))
                tasks.append(self.ttft_predictor.train_formula_moe(gpu_id, tp_size, pp_size))

        
        pp_size = len(self.decode_instance)
        tp_size = len(self.decode_instance[0])
        for pp_idx in self.decode_instance:
            for gpu_id in pp_idx:
                logger.info(f"Training gpu {gpu_id} of decode part")
                tasks.append(self.tpot_predictor.train_formula_attention(gpu_id, tp_size, pp_size))
                tasks.append(self.tpot_predictor.train_formula_gemm(gpu_id, tp_size, pp_size))
                tasks.append(self.tpot_predictor.train_formula_moe(gpu_id, tp_size, pp_size))

        await asyncio.gather(*tasks)

    async def generate(
        self,
        prompts: Optional[Union[List[str], str]] = None,
        prompt_token_ids: Optional[List[List[int]]] = None,
        sampling_params: Optional[Union[SamplingParams, List[SamplingParams]]] = None,
        TTFT_SLO:Optional[list[float]|float]=None,
        TPOT_SLO:Optional[list[float]|float]=None,
        use_tqdm: bool = True
    ) -> List[List[StepOutput]]:
        
        if prompts is None and prompt_token_ids is None:
            raise ValueError("prompts or prompt_token_ids must be provided")
        if isinstance(prompts, str):
            # Convert a single prompt to a list.
            prompts = [prompts]
        if prompts is not None and prompt_token_ids is not None:
            if len(prompts) != len(prompt_token_ids):
                raise ValueError(
                    "The lengths of prompts and prompt_token_ids must be the same."
                )
        num_requests = len(prompts) if prompts is not None else len(prompt_token_ids)
        if sampling_params is None:
            sampling_params = [SamplingParams()] * num_requests
        elif isinstance(sampling_params, SamplingParams):
            sampling_params = [sampling_params] * num_requests
        else:
            assert (
                len(sampling_params) == num_requests
            ), f"prompts should pair with the list of sampling parameters, \
                 but got {num_requests} prompts and {len(sampling_params)} sampling parameters"
                 
        async def deal_with_request_coroutine(req_index: int) -> List[StepOutput]:
            prompt = prompts[req_index] if prompts is not None else None
            token_ids = None if prompt_token_ids is None else prompt_token_ids[req_index]
            step_outputs = []
        
            req = create_request(
            prompt,
            token_ids,
            sampling_params[req_index],
            self.request_counter,
            self.engine.tokenizer
            )
            if isinstance(TTFT_SLO,float):
                TTFT_SLO_req=TTFT_SLO
            elif isinstance(TTFT_SLO,list):
                TTFT_SLO_req=TTFT_SLO[req_index]
            
            if isinstance(TPOT_SLO,float):
                TPOT_SLO_req=TPOT_SLO
            elif isinstance(TPOT_SLO,list):
                TPOT_SLO_req=TPOT_SLO[req_index]
            self.prompts[req.request_id]=Prompt_struct(len(req.prompt_token_ids),
                                                       output_length=sampling_params[req_index].max_tokens,
                                                        TTFT_SLO=0.4 if TTFT_SLO is None else TTFT_SLO_req,
                                                        TPOT_SLO=0.04 if TPOT_SLO is None else TPOT_SLO_req,
                                                        current_length=0)
 
 
            waiting_queue_on_context_engine=self.engine.context_engine.scheduler.waiting_queue
            running_queue_on_decoding_engine=self.engine.decoding_engine.scheduler.waiting_queue
        
            num_tokens=sum(len(req.prompt_token_ids) for req in waiting_queue_on_context_engine)
            budget=min(num_tokens,self.engine.context_engine.sched_config.chunk_size)
            TTFT_on_prefill=self.ttft_predictor.predict_TTFT(self.prefill_instance,budget)

            requests=[]
            for req in running_queue_on_decoding_engine:
                requests.append(self.prompts.get(req.request_id))
            '''
            if len(requests)!=0:
                TPOT_on_engine=self.tpot_predictor.predict_TPOT(self.decode_instance,requests)
            else:
                TPOT_on_engine=1
            '''
            if len(requests)!=0:
                TPOT_on_engine=self.tpot_predictor.predict_TPOT(self.decode_instance,requests)
                if TPOT_on_engine<0.02:
                    TPOT_on_engine=0.02
            else:
                TPOT_on_engine=1
            SLO_information=self.prompts[req.request_id]
            req.local_token=SLO_information.calculate_local_length(TTFT_on_prefill,TPOT_on_engine)
            async for step_output in self.engine.generate(req):
                step_outputs.append(step_output)
            return step_outputs
        
        async def generate_main() -> List[List[StepOutput]]:
            request_tasks = []
            for i in range(num_requests):
                request_tasks.append(asyncio.create_task(deal_with_request_coroutine(i)))
            result = await asyncio.gather(*request_tasks)

            return result

        return await generate_main()
    
    async def decide_local_token(self):
        '''inelegant but efficient'''
        running_queue_on_context_engine=self.engine.context_engine.scheduler.running_queue
        waiting_queue_on_context_engine=self.engine.context_engine.scheduler.waiting_queue
        running_queue_on_decoding_engine=self.engine.decoding_engine.scheduler.batch_queues
        

        num_tokens=sum(len(req.prompt_token_ids) for req in waiting_queue_on_context_engine)
        
        
        budget=min(num_tokens,self.engine.context_engine.sched_config.chunk_size)
        TTFT_on_prefill=self.ttft_predictor.predict_TTFT(self.prefill_instance,budget)

        decoding_requests = [[] for _ in range(len(running_queue_on_decoding_engine))]
        has_req_on_decode=False
        for i,batch in enumerate(running_queue_on_decoding_engine):
            for req in batch.requests:
                prompt_meta=self.prompts.get(req.request_id)
                prompt_meta.current_length=len(req.generated_token_ids)
                decoding_requests[i].append(self.prompts.get(req.request_id))
                has_req_on_decode=True
        if has_req_on_decode:
            TPOT_on_engine=self.tpot_predictor.predict_TPOT(self.decode_instance,decoding_requests)
        else:
            TPOT_on_engine=1
        for req in running_queue_on_context_engine+waiting_queue_on_context_engine:
            SLO_information=self.prompts.get(req.request_id)
            local_token=SLO_information.calculate_local_length(TTFT_on_prefill,TPOT_on_engine)
            req.local_token=local_token

        
    async def continuous_decide_loop(self):
        while True:
            try:
                await self.decide_local_token()
            except Exception as e:
                logger.warning(f"decide_local_token failed: {e}")
                traceback.print_exc()
            await asyncio.sleep(0.2) 

def add_engine_cli_args(parser: argparse.ArgumentParser):
    parser.add_argument("--model", type=str, required=True)
    parser.add_argument("--tokenizer", type=str, default=None)
    parser.add_argument("--trust-remote-code", action="store_true")
    parser.add_argument("--seed", type=int, default=0)
    parser.add_argument("--use-dummy-weights", action="store_true")
    
    parser.add_argument("--context-pipeline-parallel-size", type=int, default=1)
    parser.add_argument("--context-tensor-parallel-size", type=int, default=1)
    parser.add_argument("--decoding-pipeline-parallel-size", type=int, default=1)
    parser.add_argument("--decoding-tensor-parallel-size", type=int, default=1)
    
    parser.add_argument("--block-size", type=int, default=16)
    parser.add_argument("--max-num-blocks-per-req", type=int, default=256)
    parser.add_argument("--gpu-memory-utilization", type=float, default=0.9)
    parser.add_argument("--swap-space", type=int, default=16)
    
    parser.add_argument("--context-sched-policy", type=str, default="fcfs")
    parser.add_argument("--context-max-batch-size", type=int, default=256)
    parser.add_argument("--context-max-tokens-per-batch", type=int, default=4096)
    
    parser.add_argument("--decoding-sched-policy", type=str, default="fcfs")
    parser.add_argument("--decoding-max-batch-size", type=int, default=256)
    parser.add_argument("--decoding-max-tokens-per-batch", type=int, default=8192)
    
    parser.add_argument("--simulator-mode", action="store_true")
    parser.add_argument("--profiler-data-path", type=str, default=None)
    parser.add_argument("--gpu-mem-size-gb", type=float, default=None)