'''
The profiler is composed of several components encapsulated in layers: profile_scheduler, 
profiler_engine, llm_profiler_engine, offline_profile_engine, and finally the top-level profiler. 
The profile_scheduler focuses on purely executing batched inference for prefill and decode stages, 
in order to collect operator-level timing data. Its scheduling logic is similar to that of the context_scheduler, 
except that it does not require block migration; instead, the prefilled requests are retained within this scheduler. 
The llm_profiler_engine performs inference and collects operator metrics. 
The parallel strategies of the two engines are consistent with those of the context_engine and decode_engine used during actual inference.

Data collection is conducted at the GPU level, 
with each GPU indexed using Ray’s logical ID. For each GPU, the inference time of each operator is recorded under different tensor parallel (TP) and pipeline parallel (PP) configurations. 
This allows the system to retrieve the corresponding profiling data regardless of the parallel strategy or stage the GPU is later used in.

The llm_profile_engine encapsulates two llm_profiler_engine instances to handle token generation for each engine. 
The offline_profile_engine further wraps the llm_profiler_engine and provides the generate interface. 
The top-level profiler sets the profiling dataset and invokes the offline_profile_engine.
'''

from abc import ABC, abstractmethod
import copy
from typing import List, Callable, Tuple,Dict, AsyncGenerator,Optional,Union
import torch
import time
from transformers import AutoTokenizer
import os
from tqdm.auto import tqdm
import itertools
from importlib.resources import files
import contextlib
from mixserve.config import ContextStageSchedConfig, ParallelConfig
from mixserve.logger import init_logger
from mixserve.request import Request, BatchedRequests, MigratingRequest
from mixserve.block_manager import BlockManager

from mixserve.config import (
    ModelConfig, 
    ParallelConfig, 
    CacheConfig, 
    ContextStageSchedConfig,
    DecodingStageSchedConfig,
    DisaggParallelConfig
)
from mixserve.logger import init_logger
from mixserve.request import (
    SamplingParams,
    Request,
    create_request,
)
from mixserve.tokenizer import get_tokenizer
from mixserve.utils import Counter

from mixserve.lifetime import LifetimeEvent, LifetimeEventType

import asyncio
import math

import ray
from ray.util.placement_group import PlacementGroup
from mixserve.single_stage_engine import SingleStageLLMEngine,SLEEP_IN_EACH_EVENT_LOOP,SLEEP_WHEN_CONTEXT_NO_REQUEST,PRINT_STATUS_INTERVAL,StepOutput
logger = init_logger(__name__)
TIME_TO_CHECK_FINISH=0.1
class ProfileFCFSScheduler:
    '''
    It is called when profiling data is needed.
    It executes prefill first then decode locally to get the operator information in different parallal scale.
    '''
    def __init__(
        self,
        sched_config: ContextStageSchedConfig,#profile scheduler's logic is similar to contextstage scheduler
        parallel_config: ParallelConfig,
        block_manager: BlockManager
    ):
        self.sched_config = sched_config
        self.parallel_config = parallel_config
        self.block_manager = block_manager
        
        self.prefill_queue: List[Request] = []
        self.decoding_queue: List[Request] = [] 


    def add_request(self, request: Request) -> None:
        if len(request.generated_token_ids) == 0:
            self.prefill_queue.append(request)
        else:
            self.decoding_queue.append(request)

    def abort_request(self, request_id: int) -> None:
        
        for i, req in enumerate(self.prefill_queue):
            if req.request_id == request_id:
                del self.prefill_queue[i]
                return

        for i, req in enumerate(self.decoding_queue):
            if req.request_id == request_id:
                del self.decoding_queue[i]
                return

    def _get_block_needed(self, request: Request) -> int:
        
        block_size = self.block_manager.cache_config.block_size
        total_tokens = len(request.prompt_token_ids) + len(request.generated_token_ids)
        return (total_tokens + block_size - 1) // block_size

    def get_next_batch_and_pop(self) -> BatchedRequests:
        next_batch = BatchedRequests()
        def _check_add_request(req: Request) -> bool:
           
            if len(next_batch) >= self.sched_config.max_batch_size:
                return False

            current_tokens = next_batch.get_num_input_tokens()
            new_tokens = req.get_num_input_tokens()
            if current_tokens + new_tokens > self.sched_config.max_tokens_per_batch:
                return False
            
            
            current_blocks = sum(self._get_block_needed(r) for r in next_batch.requests)
            new_blocks = self._get_block_needed(req)
            if (current_blocks + new_blocks
                > self.block_manager.max_num_gpu_blocks):
                return False
            
            return True


        if self.prefill_queue:
            prefill_indices = []
            for i, req in enumerate(self.prefill_queue):
                if _check_add_request(req):
                    req.prefill_token_this_iteration=len(req.get_input_tokens_ids())
                    next_batch.add_request(req)
                    prefill_indices.append(i)
                    if len(next_batch) >= self.sched_config.max_batch_size:
                        break
            for i in reversed(prefill_indices):
                del self.prefill_queue[i]

        
        else:
            decode_indices = []
            for i, req in enumerate(self.decoding_queue):
                if _check_add_request(req):
                    next_batch.add_request(req)
                    decode_indices.append(i)
                    
                    if len(next_batch) >= self.sched_config.max_batch_size:
                        break
            
            
            for i in reversed(decode_indices):
                del self.decoding_queue[i]

        

        return next_batch

    def on_finish_requests(self, batch: BatchedRequests) -> None:
        for req in batch.requests:
            if not req.is_finished:
                self.decoding_queue.append(req)
            
                
    def print_status(self) -> None:
        logger.info(
            f"(profile) Prefill queue: {len(self.prefill_queue)}, "
            f"Decode queue: {len(self.decoding_queue)}"
        )
    def __repr__(self) -> str:
        return (
            f"StrictOrderProfileScheduler(max_batch_size={self.sched_config.max_batch_size}, "
            f"max_tokens_per_batch={self.sched_config.max_tokens_per_batch})"
        )

def get_profile_stage_scheduler(
    sched_config: ContextStageSchedConfig,
    parallel_config: ParallelConfig,
    block_manager: BlockManager
) -> ProfileFCFSScheduler:
    if sched_config.policy == "fcfs":
        return ProfileFCFSScheduler(sched_config,parallel_config,block_manager)

class ProfileEngine(SingleStageLLMEngine):
    def _get_scheduler(self) -> ProfileFCFSScheduler:
        return get_profile_stage_scheduler(
            self.sched_config,
            self.parallel_config,
            self.block_manager
        )
    
    def __init__(
        self,
        model_config: ModelConfig,
        parallel_config: ParallelConfig,
        cache_config: CacheConfig,
        sched_config: ContextStageSchedConfig,
        placement_groups: List[PlacementGroup],
        engine_on_new_step_output_callback: Callable[[int, StepOutput], None],
        engine_on_new_lifetime_event_callback: Callable[[int, LifetimeEvent, bool], None],
        profilng:bool=True,
        label:str=''
    ):
        super().__init__(
            None,
            model_config,
            parallel_config,
            cache_config,
            sched_config,
            placement_groups,
            engine_on_new_step_output_callback,
            engine_on_new_lifetime_event_callback,
            profiling=profilng
        )
        self.label=label
        self._stop_event = asyncio.Event()
        # All the batchedrequests that are pushed into the pipeline
        # Note: len(batched_in_pipeline) <= pp_size and batches are appended in FIFO
        self.batches_in_pipeline: List[BatchedRequests] = []
        self.batches_ret_futures = []
    
    
    def add_request(self, request: Request):
        self.scheduler.add_request(request)
    
    def _free_request_resources(self, request_id: int):
        super()._free_request_resources(request_id)

    async def _step(self):
        """
        Run one step of inference on the batch of requests chosen by the scheduler.
        
        Note: if pipeline parallelism is used, one step only kicks one stage of execution,
        and each request needs #pp steps in total to generate one token.
        
        Note2. Pipeline parallel is not tested yet
        """
        # pick next batch from scheduler
        
        batched_requests = self.scheduler.get_next_batch_and_pop()
        if len(batched_requests) == 0:
            # Two cases may cause len(batched_requests) == 0:
            # 1. No request in the waiting queue
            # 2. No enough free blocks (e.g. the decoding stage is too slow)
            self.batches_in_pipeline.append(batched_requests)
            self.batches_ret_futures.append(None)
            await asyncio.sleep(SLEEP_WHEN_CONTEXT_NO_REQUEST)
        else:            
            # allocate blocks as needed
            self.block_manager.allocate_blocks_batched(batched_requests)
            # Log down the lifetime event
            for request in batched_requests.requests:
                if request.is_context_stage():
                    self.engine_on_new_lifetime_event_callback(
                        request.request_id,
                        LifetimeEvent(LifetimeEventType.ContextBegin)
                    )
                else:
                    self.engine_on_new_lifetime_event_callback(
                        request.request_id,
                        LifetimeEvent(LifetimeEventType.DecodingBegin)
                    )
            # push the batch into pipeline
            batched_requests.start_one_iteration(time.time())
            self.batches_in_pipeline.append(batched_requests)
            request_ids = batched_requests.get_request_ids()
            input_tokens_batched = batched_requests.get_input_tokens_batched()
            first_token_indexes = batched_requests.get_first_token_indexes()
            partial_block_table = self.block_manager.get_partial_block_table(request_ids)
            original_input_tokens = batched_requests.get_original_input_tokens()

            remote_calls = self._remote_call_all_workers_async(
                "step",
                batched_requests.get_request_ids(),
                batched_requests.get_input_tokens_batched(),
                batched_requests.get_first_token_indexes(),
                self.block_manager.get_partial_block_table(
                    batched_requests.get_request_ids()
                ),
                batched_requests.get_original_input_tokens()
            )
        
            pp_size = self.parallel_config.pipeline_parallel_size
            tp_size = self.parallel_config.tensor_parallel_size
            # only the leader of the last stage return valid output, i.e., generated tokens ids
            self.batches_ret_futures.append(remote_calls[(pp_size - 1) * tp_size])

        if len(self.batches_in_pipeline) == self.parallel_config.pipeline_parallel_size:
            # if the pipeline is full, block until the earliest batch returns
            # if pipeline parallelism is not used, i.e., pp = 1, this should always be true
            if self.batches_ret_futures[0] is None:
                # No request in the batch
                self.batches_in_pipeline.pop(0)
                self.batches_ret_futures.pop(0)
            else:
                generated_tokens_ids = await self.batches_ret_futures[0]
                    
                end_time = time.time()
                generated_tokens = []
                for gen_token_id in generated_tokens_ids:
                    try:
                        token = self.tokenizer.decode(gen_token_id)
                    except Exception as e:
                        print(f" Warning: Cannot decode token with id {gen_token_id}. Error: {e}")
                        token = ""
                    generated_tokens.append(token)

                finished_batch = self.batches_in_pipeline[0]
                finished_batch.finish_one_iteration(
                    generated_tokens, generated_tokens_ids, end_time
                )
                
                self.scheduler.on_finish_requests(finished_batch)
                
                for request, new_token, new_token_id in zip(
                    finished_batch.requests, generated_tokens, generated_tokens_ids
                ):
                    step_output = StepOutput(request, new_token, new_token_id)

                    self.engine_on_new_lifetime_event_callback(
                            request.request_id,
                            LifetimeEvent(LifetimeEventType.ContextEnd)
                        )
                    self.engine_on_new_step_output_callback(
                        request.request_id,
                        step_output
                    )

                # Cannot free blocks now! The decoding stage may still need them!

                self.batches_in_pipeline.pop(0)
                self.batches_ret_futures.pop(0)
                
                # Inform the user that the request has finished the context stage
                for request in finished_batch.requests:
                    if  request.is_finished:
                        # Push the request into the bridge queue if it is not finished
                        self._free_request_resources(request.request_id)

    async def start_event_loop(self):
        async def event_loop1():
            while not self._stop_event.is_set():
                await self._step()
                await asyncio.sleep(SLEEP_IN_EACH_EVENT_LOOP)
        
        await event_loop1()
        
    def stop_event_loop(self):
        self._stop_event.set()
    def print_engine_status(self):
        self.scheduler.print_status()
class LLMProfileEngine:
    
    def __init__(
        self,
        model_config: ModelConfig,
        cache_config: CacheConfig,
        sched_config: ContextStageSchedConfig,
        decode_sched_config:DecodingStageSchedConfig,
        disagg_parallel_config: DisaggParallelConfig
    ):
        self.model_config = model_config
        self.dis_parallel_config = disagg_parallel_config
        self.cache_config = cache_config
        self.sched_config = sched_config
        self.request_counter = Counter()
        self.tokenizer = get_tokenizer(
            model_config.tokenizer,
            tokenizer_mode=model_config.tokenizer_mode,
            trust_remote_code=model_config.trust_remote_code,
        )
        
        
        logger.info("Initializing placement group")
        self.placement_groups = self._init_placement_groups()
        
        logger.info("Initializing proflie LLM engine")
        self.profile_engine_prefill=ProfileEngine(
            self.model_config,
            self.dis_parallel_config.context,
            self.cache_config,
            self.sched_config,
            self.placement_groups,
            self._on_new_step_output_callback_prefill,
            self._on_new_lifetime_event_callback,
            True,
            "prefill"
        )
        self.profile_engine_decode=ProfileEngine(
            self.model_config,
            self.dis_parallel_config.decoding,
            self.cache_config,
            ContextStageSchedConfig(policy='fcfs',max_batch_size=decode_sched_config.max_batch_size,\
                max_tokens_per_batch=decode_sched_config.max_tokens_per_batch,parallel_config=self.dis_parallel_config.decoding),
            self.placement_groups,
            self._on_new_step_output_callback_decode,
            self._on_new_lifetime_event_callback,
            True,
            "decode"
        )
        # request_id -> list of StepOutput
        # Created when calling self.generate()
        # Cleared when the request is finished
        self.request_outputs_prefill: Dict[int, asyncio.Queue[StepOutput]] = {}
        self.request_outputs_decode:Dict[int, asyncio.Queue[StepOutput]] = {}
        # request_id -> list of LifetimeEvent
        # Created when calling self.generate()
        # Cleared by the caller of self.generate() (i.e. the engine does not clear that)
        # TODO: clear this automatically to avoid memory leak
        self.request_lifetime_events: Dict[int, List[LifetimeEvent]] = {}
        self.prefill_ready:Dict[int,bool]={}
        self.decode_ready:Dict[int,bool]={}
        self.engine_initialized = False
    def release_pg(self):
        print("trying to release placement group")
        for pg in self.placement_groups:
            ray.util.remove_placement_group(pg)
            print("release placement group")

    def _on_new_step_output_callback_prefill(self, request_id: int, step_output: StepOutput):
        """
        Called by self.context_engine or self.decoding_engine when a new output token
        is generated
        """
        if step_output.is_finished:
            self.prefill_ready[request_id]=True
        self.request_outputs_prefill[request_id].put_nowait(step_output)
    def _on_new_step_output_callback_decode(self, request_id: int, step_output: StepOutput):
        """
        Called by self.context_engine or self.decoding_engine when a new output token
        is generated
        """
        if step_output.is_finished:
            self.decode_ready[request_id]=True
        self.request_outputs_decode[request_id].put_nowait(step_output)
    def _on_new_lifetime_event_callback(self, request_id: int, event: LifetimeEvent, dont_add_if_dup: bool = False):
        """
        Called by self.context_engine or self.decoding_engine when a new lifetime event
        is generated
        """
        # if dont_add_if_dup == True and self.request_lifetime_events[request_id][-1].event_type == event.event_type, don't add it
        if dont_add_if_dup and \
            len(self.request_lifetime_events[request_id]) > 0 and \
                self.request_lifetime_events[request_id][-1].event_type == event.event_type:
            return
        self.request_lifetime_events[request_id].append(event)
    
    def _init_placement_groups(self) -> Optional[List[PlacementGroup]]:
        """
        Create placement groups for all engines and all workers
        
        Currently we force the same layer of the context & decoding stage to be executed
        on the same node (we call this "aligned"). This simplifies k/v cache migration.
        """
        context_pp = self.dis_parallel_config.context.pipeline_parallel_size
        context_tp = self.dis_parallel_config.context.tensor_parallel_size
        decoding_pp = self.dis_parallel_config.decoding.pipeline_parallel_size
        decoding_tp = self.dis_parallel_config.decoding.tensor_parallel_size
        
        # Each placement group is responsible for `layer_per_placement_group` layers
        layer_per_context_pp = self.model_config.get_num_layers(self.dis_parallel_config.context)
        layer_per_decoding_pp = self.model_config.get_num_layers(self.dis_parallel_config.decoding)
        layer_per_placement_group = math.lcm(layer_per_context_pp, layer_per_decoding_pp)
        
        # Each placement group contains `workers_per_placement_group` workers
        workers_per_placement_group = \
            layer_per_placement_group // layer_per_context_pp * context_tp \
            + layer_per_placement_group // layer_per_decoding_pp * decoding_tp
        
        # There should be `num_placement_groups` placement groups in total
        num_placement_groups = self.model_config.get_num_layers() // layer_per_placement_group
        assert num_placement_groups * workers_per_placement_group == \
            context_pp * context_tp + decoding_pp * decoding_tp
        
        # Create placement groups
        placement_groups = []
        for i in range(num_placement_groups):
            placement_group = ray.util.placement_group(
                [ { "GPU": 1 }] * workers_per_placement_group,
                strategy="STRICT_PACK",
            )
            ray.get(placement_group.ready(), timeout=1000)
            placement_groups.append(placement_group)
        
        return placement_groups
        
    async def initialize(self):
        await asyncio.gather(
            self.profile_engine_prefill.initialize(),
            self.profile_engine_decode.initialize()
        )
        self.engine_initialized = True
        
    def _remote_call_all_workers(
        self, 
        func_name: str, 
        *args
    ):
        """
        call func_name on all workers, blocked until all workers finish, and return all the results
        """
        handlers = self._remote_call_all_workers_async(func_name, *args)
        return ray.get(handlers)

    def _remote_call_all_workers_async(
        self, 
        func_name: str,
        *args
    ):
        """
        call func_name asynchronously on all workers (context/decoding/both), return the futures immediately
        """
        handlers = self.profile_engine_prefill._remote_call_all_workers_async(func_name, *args)
        handlers += self.profile_engine_decode._remote_call_all_workers_async(func_name, *args)
        return handlers

    async def _start_my_event_loop(self):
        pass
    
    async def start_all_event_loops(self):
        """
        start_all_event_loops: Start context_engine's, decoding_engine's, and
        mine (LLMEngine's) event loops
        """
        logger.info("Starting LLMEngine's event loops")
        assert self.engine_initialized, "Engine not initialized. Please call engine.initialize() before starting event loops."
        await asyncio.gather(
            self.profile_engine_prefill.start_event_loop(),
            self.profile_engine_decode.start_event_loop(),
            self._start_my_event_loop()
        )
        
    async def generate(
        self,
        prompt: Optional[str],
        prompt_token_ids: Optional[List[str]],
        sampling_params: SamplingParams,
        arrival_time: Optional[float] = None,
        request_id: Optional[int] = None
    ) -> None:
        """
        generate - Generate outputs for one request
        
        This function is intended to be used as an async generator, i.e., it can be
        used in a for loop. For example, `async for output in engine.generate(...)`
        """
        assert self.engine_initialized, "Engine not initialized. Please call engine.initialize() before generating."
        req = create_request(
            prompt,
            prompt_token_ids,
            sampling_params,
            self.request_counter,
            self.tokenizer,
            arrival_time,
            request_id,
        )
        req_copy=copy.deepcopy(req)
        self.prefill_ready[req.request_id]=False
        self.decode_ready[req.request_id]=False
        self.request_outputs_prefill[req.request_id] = asyncio.Queue()
        self.request_outputs_decode[req.request_id] = asyncio.Queue()
        self.request_lifetime_events[req.request_id] = []
        self._on_new_lifetime_event_callback(req.request_id, LifetimeEvent(LifetimeEventType.Issued))
        self.profile_engine_prefill.add_request(req)
        self.profile_engine_decode.add_request(req_copy)

        while not (self.prefill_ready[req.request_id] and self.decode_ready[req.request_id]):
            try:
                await asyncio.sleep(TIME_TO_CHECK_FINISH)
            except asyncio.CancelledError:
                # The engine returns
                # Exception should be handled by the engine, not me
                return
            except GeneratorExit:
                return
            
        del self.prefill_ready[req.request_id]
        del self.decode_ready[req.request_id]
        del self.request_outputs_prefill[req.request_id]
        del self.request_outputs_decode[req.request_id]

    def abort_request(self, request_id: int):
        self.profile_engine.abort_request(request_id)
        
class OfflineProfileLLM:
    def __init__(
        self,
        model_config: ModelConfig,
        disagg_parallel_config:ParallelConfig,
        cache_config: CacheConfig,
        context_sched_config: ContextStageSchedConfig,
        decode_sched_config:DecodingStageSchedConfig
    ):
        self.request_outputs: Dict[int, asyncio.Queue[StepOutput]] = {}
        self._event_loop_task: Optional[asyncio.Task] = None
        # request_id -> list of LifetimeEvent
        # Created when calling self.generate()
        # Cleared by the caller of self.generate() (i.e. the engine does not clear that)
        # TODO: clear this automatically to avoid memory leak
        self.request_lifetime_events: Dict[int, List[LifetimeEvent]] = {}
        
        self.engine = LLMProfileEngine(
            model_config=model_config,
            disagg_parallel_config=disagg_parallel_config,
            cache_config=cache_config,
            decode_sched_config=decode_sched_config,
            sched_config=context_sched_config,
            
        )
        package_root = files('mixserve')  
        db_path = os.path.join(package_root, 'benchdb')

        # check benchdb path
        if not os.path.exists(db_path):
            os.makedirs(db_path, exist_ok=True)
        
    async def initialize(self):
        await self.engine.initialize()
        
    async def release_pg(self):
        if self._event_loop_task and not self._event_loop_task.done():
            self.engine.profile_engine_prefill.stop_event_loop()
            self.engine.profile_engine_decode.stop_event_loop()
            self._event_loop_task.cancel()
            with contextlib.suppress(asyncio.CancelledError):
                await self._event_loop_task 
        self.engine.release_pg()
        del self.engine
        
    def _on_new_step_output_callback(self, request_id: int, step_output: StepOutput):
        """
        Called by self.context_engine or self.decoding_engine when a new output token
        is generated
        """
        if self.request_outputs.get(request_id):
            self.request_outputs[request_id].put_nowait(step_output)
        
    def _on_new_lifetime_event_callback(self, request_id: int, event: LifetimeEvent, dont_add_if_dup: bool = False):
        """
        Called by self.context_engine or self.decoding_engine when a new lifetime event
        is generated
        """
        # if dont_add_if_dup == True and self.request_lifetime_events[request_id][-1].event_type == event.event_type, don't add it
        if dont_add_if_dup and \
            len(self.request_lifetime_events[request_id]) > 0 and \
                self.request_lifetime_events[request_id][-1].event_type == event.event_type:
            return
        self.request_lifetime_events[request_id].append(event)
    
    async def generate(
        self,
        prompts: Optional[Union[List[str], str]] = None,
        prompt_token_ids: Optional[List[List[int]]] = None,
        sampling_params: Optional[Union[SamplingParams, List[SamplingParams]]] = None,
        use_tqdm: bool = True
    ) -> List[List[StepOutput]]:
        if prompts is None and prompt_token_ids is None:
            raise ValueError("prompts or prompt_token_ids must be provided")
        if isinstance(prompts, str):
            # Convert a single prompt to a list.
            prompts = [prompts]
        if prompts is not None and prompt_token_ids is not None:
            if len(prompts) != len(prompt_token_ids):
                raise ValueError(
                    "The lengths of prompts and prompt_token_ids must be the same."
                )

        num_requests = len(prompts) if prompts is not None else len(prompt_token_ids)
        if sampling_params is None:
            sampling_params = [SamplingParams()] * num_requests
        elif isinstance(sampling_params, SamplingParams):
            sampling_params = [sampling_params] * num_requests
        else:
            assert (
                len(sampling_params) == num_requests
            ), f"prompts should pair with the list of sampling parameters, \
                 but got {num_requests} prompts and {len(sampling_params)} sampling parameters"

        async def deal_with_request_coroutine(req_index: int) :
            prompt = prompts[req_index] if prompts is not None else None
            token_ids = None if prompt_token_ids is None else prompt_token_ids[req_index]
            await self.engine.generate(prompt, token_ids, sampling_params[req_index])
                

        
        async def generate_main():
            if self._event_loop_task is None or self._event_loop_task.done():
                self._event_loop_task = asyncio.create_task(self.engine.start_all_event_loops())
            request_tasks = [
                asyncio.create_task(deal_with_request_coroutine(i))
                for i in range(num_requests)
            ]
            return await asyncio.gather(*request_tasks)

        return await generate_main()

class Profiler:
    '''offline profiling'''
    finish_profile:bool
    def __init__(self,cache_config:CacheConfig,model_config:ModelConfig,
    profile_config:ContextStageSchedConfig,
    decode_profile_config:DecodingStageSchedConfig,
    dis_para_config:DisaggParallelConfig
    ):
        self.cache_config=cache_config
        self.model_config=model_config
        self.finish_profile=False
        self.profile_config=profile_config
        self.decode_profile_config=ContextStageSchedConfig(policy='fcfs',max_batch_size=decode_profile_config.max_batch_size\
            ,max_tokens_per_batch=decode_profile_config.max_tokens_per_batch,parallel_config=dis_para_config.decoding)
        self.dis_para_config=dis_para_config
    
    @staticmethod
    def generate_prompt(batchsize:int,input_length:int,model_dir:str):
        num_tokens=batchsize*input_length
        
        text = " ".join([str(i) for i in range(1, 1000)])
        tokenizer = AutoTokenizer.from_pretrained(model_dir)
        stem_token_ids = tokenizer(text).input_ids
        input_ids_list = copy.deepcopy(stem_token_ids)
        while len(input_ids_list) < num_tokens:
            input_ids_list += input_ids_list
        input_ids = torch.tensor(input_ids_list[:num_tokens], dtype=torch.int32, device="cpu")
        prompt_token_ids = input_ids.view(batchsize, input_length).tolist()
        return prompt_token_ids
    
    async def profile(self):
        sampling_params = SamplingParams(
            temperature=0.8, top_p=0.95, max_tokens=50, stop=["\n"],ignore_eos=True
        )

        # Create an LLM for offline inference.
        # TODO:CHANGE OfflineLLM to ProfileLLM
        llm = OfflineProfileLLM(
            model_config=self.model_config,
            disagg_parallel_config=self.dis_para_config,
            cache_config=self.cache_config,
            context_sched_config=self.profile_config,
            decode_sched_config=self.decode_profile_config
        )
        await llm.initialize()
        batch_sizes = [1,2,4,8,16,32,48]
        input_lens  = [64, 128, 256, 384, 512,1024]

        total_iters = len(batch_sizes) * len(input_lens)      # 42
        try:
            with tqdm(total=total_iters, desc="Profiling") as pbar:
                for bs, in_len in itertools.product(batch_sizes, input_lens):
                    prompts_token_ids = Profiler.generate_prompt(
                        bs, in_len, self.model_config.model
                    )
                    await llm.generate(
                        prompt_token_ids=prompts_token_ids,
                        sampling_params=sampling_params,
                    )
                    pbar.update(1)      # ← 刷新进度条
                    print(f"bs {bs},input_length {in_len} finished")
            self.finish_profile = True
        finally:
            await llm.release_pg()      # 建议把 release_pg 改成 async 并 await
        return
     