import logging
import os
import time
from contextlib import contextmanager
from typing import Any, Dict, List, Optional, Tuple

import torch
from huggingface_hub import snapshot_download

from sglang.srt.distributed import GroupCoordinator, patch_tensor_parallel_group
from sglang.srt.layers.dp_attention import disable_dp_size
from sglang.srt.layers.logits_processor import LogitsProcessorOutput
from sglang.srt.layers.sampler import get_token_ids_logprobs, get_top_logprobs
from sglang.srt.managers.schedule_batch import (
    Req,
    ScheduleBatch,
    get_last_loc,
    global_server_args_dict,
)
from sglang.srt.managers.tp_worker import TpModelWorker
from sglang.srt.model_executor.forward_batch_info import (
    CaptureHiddenMode,
    ForwardBatch,
    ForwardMode,
)
from sglang.srt.server_args import ServerArgs
from sglang.srt.speculative.eagle_draft_cuda_graph_runner import (
    EAGLEDraftCudaGraphRunner,
)
from sglang.srt.speculative.eagle_utils import (
    SpecReqMigrationInfo,
    SpecReqMigrationInfoPhase1,
    NewSpecReqMigrationInfo,
    NewSpecReqMigrationInfoPhase1,
    BatchSpecReqMigrationInfo,
    MigrationPhase,
    EagleDraftInput,
    EagleVerifyInput,
    EagleVerifyOutput,
    assign_draft_cache_locs,
    generate_token_bitmask,
    select_top_k_tokens,
)
from sglang.srt.speculative.spec_info import SpeculativeAlgorithm
from sglang.srt.utils import empty_context, fast_topk, get_available_gpu_memory, is_cuda
from sglang.srt.sampling.sampling_batch_info import SamplingBatchInfo
from sglang.srt.mem_cache.radix_cache import RadixCache
import torch.distributed as dist
from sglang.srt.speculative.build_eagle_tree import build_predictor,first_rank_print
import numpy as np

if is_cuda():
    from sgl_kernel import segment_packbits

logger = logging.getLogger(__name__)


@contextmanager
def draft_tp_context(tp_group: GroupCoordinator):
    # Draft model doesn't use dp and has its own tp group.
    # We disable mscclpp now because it doesn't support 2 comm groups.
    with disable_dp_size(), patch_tensor_parallel_group(tp_group):
        yield


class EAGLEWorker(TpModelWorker):

    def __init__(
        self,
        server_args: ServerArgs,
        gpu_id: int,
        tp_rank: int,
        dp_rank: Optional[int],
        nccl_port: int,
        target_worker: TpModelWorker,
    ):
        # Parse arguments
        self.server_args = server_args
        self.topk = server_args.speculative_eagle_topk
        self.speculative_num_steps = server_args.speculative_num_steps
        self.padded_static_len = self.speculative_num_steps + 1
        self.enable_nan_detection = server_args.enable_nan_detection
        self.gpu_id = gpu_id
        self.device = server_args.device
        self.target_worker = target_worker
        self.page_size = server_args.page_size
        self.speculative_algorithm = SpeculativeAlgorithm.from_string(
            server_args.speculative_algorithm
        )

        self.last_step_time = 0
        # Override context length with target model's context length
        server_args.context_length = target_worker.model_runner.model_config.context_len

        # Do not capture cuda graph in `super().__init__()`
        # It will be captured later.
        backup_disable_cuda_graph = server_args.disable_cuda_graph
        server_args.disable_cuda_graph = True
        # Share the allocator with a target worker.
        # Draft and target worker own their own KV cache pools.
        self.req_to_token_pool, self.token_to_kv_pool_allocator = (
            target_worker.get_memory_pool()
        )

        # Load hot token ids
        if self.speculative_algorithm.is_eagle3():
            if server_args.speculative_token_map is not None:
                logger.warning(
                    "Speculative token map specified, but EAGLE3 models already have this. Ignoring the specified token map."
                )
            self.hot_token_id = None
        elif server_args.speculative_token_map is not None:
            self.hot_token_id = load_token_map(server_args.speculative_token_map)
            server_args.json_model_override_args = (
                f'{{"hot_vocab_size": {len(self.hot_token_id)}}}'
            )
        else:
            self.hot_token_id = None

        # Init draft worker
        with empty_context():
            super().__init__(
                server_args=server_args,
                gpu_id=gpu_id,
                tp_rank=tp_rank,
                pp_rank=0,  # FIXME
                dp_rank=dp_rank,
                nccl_port=nccl_port,
                is_draft_worker=True,
                req_to_token_pool=self.req_to_token_pool,
                token_to_kv_pool_allocator=self.token_to_kv_pool_allocator,
            )

        embed, head = self.target_worker.model_runner.model.get_embed_and_head()

        if self.speculative_algorithm.is_eagle3():
            # EAGLE3 models don't share lm_head
            self.draft_model_runner.model.set_embed(embed)

            # grab hot token ids
            self.hot_token_id = self.draft_model_runner.model.get_hot_token_id().to(
                embed.device
            )
        else:
            if self.hot_token_id is not None:
                head = head.clone()
                self.hot_token_id = self.hot_token_id.to(head.device)
                head.data = head.data[self.hot_token_id]

            # Share the embedding and lm_head
            self.draft_model_runner.model.set_embed_and_head(embed, head)

        # Init attention backend and cuda graphs
        self.draft_model_runner.server_args.disable_cuda_graph = (
            backup_disable_cuda_graph
        )
        self.draft_tp_context = (
            draft_tp_context if server_args.enable_dp_attention else empty_context
        )
        with self.draft_tp_context(self.draft_model_runner.tp_group):
            self.init_attention_backend()
            self.init_cuda_graphs()

        self.tmp_migration_requests = None

        # Get configs for both models
        self.num_draft_layers = self.model_runner.model_config.num_hidden_layers
        self.num_target_layers = self.target_worker.model_runner.model_config.num_hidden_layers

        # Calculate head dimensions
        self.draft_numel_per_layer = self.model_runner.token_to_kv_pool.get_key_buffer(0)[0].numel()
        self.target_numel_per_layer = self.target_worker.model_runner.token_to_kv_pool.get_key_buffer(0)[0].numel()

        # Calculate numel for draft model's KV cache (K and V combined)
        self.draft_kv_numel_per_token = 2 * self.num_draft_layers * self.draft_numel_per_layer

        # Calculate numel for target model's KV cache (K and V combined)
        self.target_kv_numel_per_token = 2 * self.num_target_layers * self.target_numel_per_layer

        # Sum them up for the final total
        self.kv_numel_per_token = self.draft_kv_numel_per_token + self.target_kv_numel_per_token
        
        self.migrate_01_steps: List[int] = [10, 20, 30]
        self.migrate_10_steps: List[int] = []

        self.cur_decoding_step = 0
        self.enable_migration_test =False
        self.log_accept_length_file = f"/sglang/outputs/accept_length_{dp_rank}.txt"
        if self.server_args.enable_auto_spec:
            self.max_draft_tokens = (1 + (self.speculative_num_steps - 1) * self.topk) * self.topk
            #self.verify_timings
        
        self.draft_model_n_heads = self.model_runner.model_config.num_key_value_heads
        self.draft_model_headdim =  self.model_runner.model_config.head_dim
        self.target_model_n_heads = self.target_worker.model_runner.model_config.num_key_value_heads
        self.target_model_headdim =  self.target_worker.model_runner.model_config.head_dim
        self.phase3_reqs = None
        
        self.recv_phase3 = False
        self.send_phase3 = False
        self.res1 = None
        self.res2 = None
        self.req = None
        self.concat_tensor_phase3 = None
        self.new_req_pool_indices = None
        self.new_physical_kv_slots_flat = None
        self.is_profiling = server_args.if_spec_profiling
        self.dp_rank = dp_rank
        self.build_prediction_cache()
        self.test_thoughputs = True
        self.test_batch = 0

    def init_attention_backend(self):
        # Create multi-step attn backends and cuda graph runners
        if self.server_args.attention_backend == "flashinfer":
            if not global_server_args_dict["use_mla_backend"]:
                from sglang.srt.layers.attention.flashinfer_backend import (
                    FlashInferMultiStepDraftBackend,
                )

                self.draft_attn_backend = FlashInferMultiStepDraftBackend(
                    self.draft_model_runner,
                    self.topk,
                    self.speculative_num_steps,
                )
            else:
                from sglang.srt.layers.attention.flashinfer_mla_backend import (
                    FlashInferMLAMultiStepDraftBackend,
                )

                self.draft_attn_backend = FlashInferMLAMultiStepDraftBackend(
                    self.draft_model_runner,
                    self.topk,
                    self.speculative_num_steps,
                )
            self.draft_extend_attn_backend = None
            self.padded_static_len = self.speculative_num_steps + 1
            self.has_prefill_wrapper_verify = True
        elif self.server_args.attention_backend == "triton":
            from sglang.srt.layers.attention.triton_backend import (
                TritonMultiStepDraftBackend,
            )

            self.draft_attn_backend = TritonMultiStepDraftBackend(
                self.draft_model_runner,
                self.topk,
                self.speculative_num_steps,
            )
            self.draft_extend_attn_backend = None
            self.padded_static_len = self.speculative_num_steps + 1
            self.has_prefill_wrapper_verify = False
        elif self.server_args.attention_backend == "fa3":
            from sglang.srt.layers.attention.flashattention_backend import (
                FlashAttentionMultiStepBackend,
            )

            self.draft_attn_backend = FlashAttentionMultiStepBackend(
                self.draft_model_runner,
                self.topk,
                self.speculative_num_steps,
            )
            self.draft_extend_attn_backend = None
            self.padded_static_len = self.speculative_num_steps + 1
            self.has_prefill_wrapper_verify = False
        elif self.server_args.attention_backend == "flashmla":
            from sglang.srt.layers.attention.flashmla_backend import (
                FlashMLAMultiStepDraftBackend,
            )

            self.draft_attn_backend = FlashMLAMultiStepDraftBackend(
                self.draft_model_runner,
                self.topk,
                self.speculative_num_steps,
            )
            self.draft_extend_attn_backend = None
            self.padded_static_len = self.speculative_num_steps + 1
            self.has_prefill_wrapper_verify = False
        else:
            raise ValueError(
                f"EAGLE is not supported in attention backend {self.server_args.attention_backend}"
            )

        self.draft_model_runner.draft_attn_backend = self.draft_attn_backend

    def init_cuda_graphs(self):
        """Capture cuda graphs."""
        self.cuda_graph_runner = None
        self.cuda_graph_runner_for_draft_extend = None

        if self.server_args.disable_cuda_graph:
            return

        # Capture draft
        tic = time.perf_counter()
        before_mem = get_available_gpu_memory(self.device, self.gpu_id)
        logger.info(
            f"Capture draft cuda graph begin. This can take up to several minutes. avail mem={before_mem:.2f} GB"
        )
        self.cuda_graph_runner = EAGLEDraftCudaGraphRunner(self)
        after_mem = get_available_gpu_memory(self.device, self.gpu_id)
        logger.info(
            f"Capture draft cuda graph end. Time elapsed: {time.perf_counter() - tic:.2f} s. avail mem={after_mem:.2f} GB. mem usage={(before_mem - after_mem):.2f} GB."
        )

        # Capture extend
        if self.draft_extend_attn_backend:
            raise NotImplementedError()

    @property
    def draft_model_runner(self):
        return self.model_runner

    def forward_batch_speculative_generation(
        self, batch: ScheduleBatch
    ) -> Tuple[LogitsProcessorOutput, List[int], int, int]:
        """Run speculative decoding forward.

        NOTE: Many states of batch is modified as you go through. It is not guaranteed that
        the final output batch have the same state as the input.

        Args:
            batch: The batch to run forward. The state of the batch is modified as it runs.
        Returns:
            A tuple of the final logit output of the target model, next tokens accepted,
            the batch id (used for overlap schedule), and number of accepted tokens.
        """
        if batch.forward_mode.is_decode():
            if self.is_profiling:
                cur_time = time.time()
                
                if cur_time - self.last_step_time < 0.1 and batch.spec_info is not None and batch.spec_info.accept_length_cpu is not None:
                    accept_length_cpu =  batch.spec_info.accept_length_cpu
                    average_accept_length = sum(accept_length_cpu) / len(accept_length_cpu)
                    with open("/sglang/outputs/time_profiling.txt", "a") as f:
                        f.write(f"{batch.batch_size()},{cur_time - self.last_step_time},{average_accept_length}\n")
                self.last_step_time = cur_time
            self.cur_decoding_step += 1

            if self.test_thoughputs:
                cur_time = time.time()
                if cur_time - self.last_step_time < 0.2 and batch.spec_info is not None and batch.spec_info.accept_length_cpu is not None and self.tp_rank == 0:
                    accept_length_cpu =  batch.spec_info.accept_length_cpu
                    num_generate_tokens = sum(accept_length_cpu) + batch.batch_size()
                    with open(f"/sglang/throughputs/outputs/throughputs_{self.dp_rank}_{self.test_batch}.txt", "a") as f:
                        f.write(f"{cur_time},{self.dp_rank},{batch.batch_size()},{self.server_args.speculative_num_draft_tokens},{cur_time - self.last_step_time},{num_generate_tokens}\n")
                self.last_step_time = cur_time

            with self.draft_tp_context(self.draft_model_runner.tp_group):
                spec_info = self.draft(batch)
            self.handle_migrate_phase3()
            logits_output, verify_output, model_worker_batch, can_run_cuda_graph = (
                self.verify(batch, spec_info)
            )

            # If it is None, it means all requests are finished
            if batch.spec_info.verified_id is not None:
                with self.draft_tp_context(self.draft_model_runner.tp_group):
                    self.forward_draft_extend_after_decode(batch)
            return (
                logits_output,
                verify_output.verified_id,
                model_worker_batch.bid,
                sum(verify_output.accept_length_per_req_cpu),
                can_run_cuda_graph,
            )
        elif batch.forward_mode.is_idle():
            model_worker_batch = batch.get_model_worker_batch()
            logits_output, next_token_ids, _ = (
                self.target_worker.forward_batch_generation(model_worker_batch)
            )

            return logits_output, next_token_ids, model_worker_batch.bid, 0, False
        else:
            self.test_batch += 1
            logits_output, next_token_ids, bid = self.forward_target_extend(batch)
            with self.draft_tp_context(self.draft_model_runner.tp_group):
                self.forward_draft_extend(
                    batch, logits_output.hidden_states, next_token_ids
                )
            return logits_output, next_token_ids, bid, 0, False

    def forward_target_extend(
        self, batch: ScheduleBatch
    ) -> Tuple[LogitsProcessorOutput, List[int], int]:
        """Run the target extend.

        Args:
            batch: The batch to run. States could be modified.

        Returns:
            logits_output: The output of logits. It will contain the full hidden states.
            next_token_ids: Next token ids generated.
            bid: The model batch ID. Used for overlap schedule.
        """
        # Forward with the target model and get hidden states.
        # We need the full hidden states to prefill the KV cache of the draft model.
        model_worker_batch = batch.get_model_worker_batch()
        model_worker_batch.capture_hidden_mode = CaptureHiddenMode.FULL
        logits_output, next_token_ids, _ = self.target_worker.forward_batch_generation(
            model_worker_batch
        )
        return logits_output, next_token_ids, model_worker_batch.bid

    def build_prediction_cache(self):
        self.predictor = build_predictor()
        self.cached_data = {}
        verified_token_num = [x for x in range(10, 321, 10)]
        seq_lens_sum = [x for x in range(100, 32000, 800)]
        seq_lens_mesh, verified_token_mesh = np.meshgrid(seq_lens_sum, verified_token_num, indexing='ij')
        input_data_array = np.column_stack([
            seq_lens_mesh.flatten(),
            verified_token_mesh.flatten(),
            np.full(seq_lens_mesh.size, 846),
            np.full(seq_lens_mesh.size, 362.1)
        ])
        
        # 传递 input_data_array 给 predict_batch
        self.predictor.predict_batch(input_data_array)

    def draft(self, batch: ScheduleBatch):
        # Parse args
        #import time
        #torch.cuda.synchronize()
        #t1 = time.time()
        num_seqs = batch.batch_size()
        spec_info = batch.spec_info

        # Accumulate penalty
        if batch.sampling_info.penalizer_orchestrator.is_required:
            # This is a relaxed version of penalties for speculative decoding.
            batch.sampling_info.penalizer_orchestrator.cumulate_output_tokens(
                spec_info.verified_id.to(torch.int64)
            )

        # Allocate cache locations
        if self.page_size == 1:
            out_cache_loc, token_to_kv_pool_state_backup = batch.alloc_token_slots(
                num_seqs * self.topk * self.speculative_num_steps, backup_state=True
            )
        else:
            if self.topk == 1:
                prefix_lens = batch.seq_lens
                seq_lens = prefix_lens + self.speculative_num_steps
                extend_num_tokens = num_seqs * self.speculative_num_steps
            else:
                # In this case, the last partial page needs to be duplicated.
                # KV cache layout in batch.req_to_token_pool.req_to_token:
                #
                # | -------- | -- xxxx .. | -- xxxx .. | -- xxxx .. |
                #    prefix     top-k = 0    tok-k = 1    top-k = 2
                #
                #  "-" means prefix tokens
                #  "x" means speculative draft tokens
                #  "." means padded tokens

                # TODO: fuse these ops
                prefix_lens = batch.seq_lens
                last_page_lens = prefix_lens % self.page_size
                num_new_pages = (
                    last_page_lens + self.speculative_num_steps + self.page_size - 1
                ) // self.page_size
                seq_lens = (
                    prefix_lens // self.page_size * self.page_size
                    + num_new_pages * (self.page_size * self.topk)
                )
                extend_num_tokens = torch.sum(seq_lens - prefix_lens).item()
                raise NotImplementedError(
                    "page_size > 1 and top_k > 1 are not supported."
                )
                # TODO: Support page_size > 1 and top_k > 1
                # 1. Duplicate the KV cache in the last partial page for all top-k segments
                # 2. Modify generate_draft_decode_kv_indices accordingly

            last_loc = get_last_loc(
                batch.req_to_token_pool.req_to_token,
                batch.req_pool_indices,
                prefix_lens,
            )
            out_cache_loc, token_to_kv_pool_state_backup = (
                batch.alloc_paged_token_slots_extend(
                    prefix_lens,
                    seq_lens,
                    last_loc,
                    extend_num_tokens,
                    backup_state=True,
                )
            )

        assign_draft_cache_locs[(num_seqs,)](
            batch.req_pool_indices,
            batch.req_to_token_pool.req_to_token,
            batch.seq_lens,
            out_cache_loc,
            batch.req_to_token_pool.req_to_token.shape[1],
            self.topk,
            self.speculative_num_steps,
            self.page_size,
        )
        batch.out_cache_loc = out_cache_loc
        batch.seq_lens_sum = torch.sum(batch.seq_lens).item()
        spec_info.positions = batch.seq_lens.repeat_interleave(self.topk, dim=0)

        # Get forward batch
        spec_info.capture_hidden_mode = CaptureHiddenMode.LAST
        model_worker_batch = batch.get_model_worker_batch()
        forward_batch = ForwardBatch.init_new(
            model_worker_batch, self.draft_model_runner
        )
        can_cuda_graph = self.cuda_graph_runner and self.cuda_graph_runner.can_run(
            forward_batch
        )
        #torch.cuda.synchronize()
        #t2 = time.time()
        if can_cuda_graph:
            score_list, token_list, parents_list = self.cuda_graph_runner.replay(
                forward_batch
            )
        else:
            # Initialize attention backend
            self.draft_attn_backend.init_forward_metadata(forward_batch)
            forward_batch = ForwardBatch.init_new(
                model_worker_batch, self.draft_model_runner
            )
            # Run forward steps
            score_list, token_list, parents_list = self.draft_forward(forward_batch)

        #torch.cuda.synchronize()
        #t2 = time.time()
        self.token_to_kv_pool_allocator.restore_state(token_to_kv_pool_state_backup)

        #torch.cuda.synchronize()
        #t3 = time.time()
        ret = EagleVerifyInput.create(
            spec_info.verified_id,
            score_list,
            token_list,
            parents_list,
            batch.seq_lens,
            batch.seq_lens_sum,
            self.topk,
            self.speculative_num_steps,
            self.server_args.speculative_num_draft_tokens,
            self.predictor,
            self.server_args.enable_dynamic_spec,
        )
        #torch.cuda.synchronize()
        #t4 = time.time()
        #first_rank_print("setup:",t2-t1,flush=True)
        #first_rank_print("draft_forward:",t3-t2,flush=True)
        #first_rank_print("build_tree:",t4-t3,flush=True)
        return ret

    def draft_forward(self, forward_batch: ForwardBatch):
        # Parse args
        spec_info = forward_batch.spec_info
        out_cache_loc = forward_batch.out_cache_loc
        topk_p, topk_index, hidden_states = (
            spec_info.topk_p,
            spec_info.topk_index,
            spec_info.hidden_states,
        )
        if self.hot_token_id is not None:
            topk_index = self.hot_token_id[topk_index]

        # Return values
        score_list: List[torch.Tensor] = []
        token_list: List[torch.Tensor] = []
        parents_list: List[torch.Tensor] = []

        # Forward multiple steps
        scores = None
        for i in range(self.speculative_num_steps):
            input_ids, hidden_states, scores, tree_info = select_top_k_tokens(
                i, topk_p, topk_index, hidden_states, scores, self.topk
            )
            score_list.append(tree_info[0])
            token_list.append(tree_info[1])
            parents_list.append(tree_info[2])

            # We don't need to run the last forward. we get 1 token from draft prefill and (#spec steps - 1) tokens here
            if i == self.speculative_num_steps - 1:
                break

            # Set inputs
            forward_batch.input_ids = input_ids
            out_cache_loc = out_cache_loc.view(forward_batch.batch_size, -1)
            forward_batch.out_cache_loc = out_cache_loc[
                :, self.topk * i : self.topk * (i + 1)
            ].flatten()
            forward_batch.positions.add_(1)
            forward_batch.attn_backend = self.draft_attn_backend.attn_backends[i]
            spec_info.hidden_states = hidden_states

            # Run forward
            logits_output = self.draft_model_runner.model.forward(
                forward_batch.input_ids, forward_batch.positions, forward_batch
            )
            self._detect_nan_if_needed(logits_output)
            probs = torch.softmax(logits_output.next_token_logits, dim=-1)
            topk_p, topk_index = fast_topk(probs, self.topk, dim=-1)
            if self.hot_token_id is not None:
                topk_index = self.hot_token_id[topk_index]
            hidden_states = logits_output.hidden_states
        for i, score in enumerate(score_list):
            score_list[i] = 0.197857972*score**3 - 0.564602284*score**2 + 1.36081359*score+0.000578697522
            #score_list[i] = 0.97408886 * score + 0.0551778
        return score_list, token_list, parents_list

    def verify(self, batch: ScheduleBatch, spec_info: EagleVerifyInput):
        spec_info.prepare_for_verify(batch, self.page_size)
        batch.forward_mode = ForwardMode.TARGET_VERIFY
        batch.spec_info = spec_info
        model_worker_batch = batch.get_model_worker_batch()

        if batch.has_grammar:
            retrieve_next_token_cpu = spec_info.retrive_next_token.cpu()
            retrieve_next_sibling_cpu = spec_info.retrive_next_sibling.cpu()
            draft_tokens_cpu = spec_info.draft_token.view(
                spec_info.retrive_next_token.shape
            ).cpu()

        # Forward
        logits_output, _, can_run_cuda_graph = (
            self.target_worker.forward_batch_generation(
                model_worker_batch, skip_sample=True
            )
        )

        vocab_mask = None
        if batch.has_grammar:
            # Generate the logit mask for structured output.
            # Overlap the CPU operations for bitmask generation with the forward pass.
            vocab_mask = generate_token_bitmask(
                batch.reqs,
                spec_info,
                retrieve_next_token_cpu,
                retrieve_next_sibling_cpu,
                draft_tokens_cpu,
                batch.sampling_info.vocab_size,
            )

            if vocab_mask is not None:
                assert spec_info.grammar is not None
                vocab_mask = vocab_mask.to(spec_info.retrive_next_token.device)
                # otherwise, this vocab mask will be the one from the previous extend stage
                # and will be applied to produce wrong results
                batch.sampling_info.vocab_mask = None

        self._detect_nan_if_needed(logits_output)
        spec_info.hidden_states = logits_output.hidden_states
        res: EagleVerifyOutput = spec_info.verify(
            batch,
            logits_output,
            self.token_to_kv_pool_allocator,
            self.page_size,
            vocab_mask,
        )

        # Post process based on verified outputs.
        # Pick indices that we care (accepted)
        logits_output.next_token_logits = logits_output.next_token_logits[
            res.accepted_indices
        ]
        logits_output.hidden_states = logits_output.hidden_states[res.accepted_indices]

        # Prepare the batch for the next draft forwards.
        batch.forward_mode = ForwardMode.DECODE
        batch.spec_info = res.draft_input

        if batch.return_logprob:
            self.add_logprob_values(batch, res, logits_output)

        return logits_output, res, model_worker_batch, can_run_cuda_graph

    def add_logprob_values(
        self,
        batch: ScheduleBatch,
        res: EagleVerifyOutput,
        logits_output: LogitsProcessorOutput,
    ):
        # Extract args
        logits_output = res.logits_output
        top_logprobs_nums = batch.top_logprobs_nums
        token_ids_logprobs = batch.token_ids_logprobs
        logprobs = torch.nn.functional.log_softmax(
            logits_output.next_token_logits, dim=-1
        )
        batch_next_token_ids = res.verified_id
        num_tokens_per_req = [accept + 1 for accept in res.accept_length_per_req_cpu]

        # We should repeat top_logprobs_nums to match num_tokens_per_req.
        top_logprobs_nums_repeat_interleaved = []
        token_ids_logprobs_repeat_interleaved = []
        for num, num_tokens in zip(top_logprobs_nums, num_tokens_per_req):
            top_logprobs_nums_repeat_interleaved.extend([num] * num_tokens)
        for token_ids, num_tokens in zip(token_ids_logprobs, num_tokens_per_req):
            token_ids_logprobs_repeat_interleaved.extend([token_ids] * num_tokens)

        # Extract logprobs
        if any(x > 0 for x in top_logprobs_nums):
            (
                logits_output.next_token_top_logprobs_val,
                logits_output.next_token_top_logprobs_idx,
            ) = get_top_logprobs(logprobs, top_logprobs_nums_repeat_interleaved)

        if any(x is not None for x in token_ids_logprobs):
            (
                logits_output.next_token_token_ids_logprobs_val,
                logits_output.next_token_token_ids_logprobs_idx,
            ) = get_token_ids_logprobs(logprobs, token_ids_logprobs_repeat_interleaved)

        logits_output.next_token_logprobs = logprobs[
            torch.arange(len(batch_next_token_ids), device=batch.sampling_info.device),
            batch_next_token_ids,
        ]

        # Add output logprobs to the request
        pt = 0
        next_token_logprobs = logits_output.next_token_logprobs.tolist()
        verified_ids = batch_next_token_ids.tolist()
        for req, num_tokens in zip(batch.reqs, num_tokens_per_req):
            for _ in range(num_tokens):
                if req.return_logprob:
                    req.output_token_logprobs_val.append(next_token_logprobs[pt])
                    req.output_token_logprobs_idx.append(verified_ids[pt])
                    if req.top_logprobs_num > 0:
                        req.output_top_logprobs_val.append(
                            res.logits_output.next_token_top_logprobs_val[pt]
                        )
                        req.output_top_logprobs_idx.append(
                            res.logits_output.next_token_top_logprobs_idx[pt]
                        )
                pt += 1

    def forward_draft_extend(
        self,
        batch: ScheduleBatch,
        hidden_states: torch.Tensor,
        next_token_ids: List[int],
    ):
        """Run draft model extend. This API modifies the states of the batch.

        Args:
            batch: The batch to run.
            hidden_states: Hidden states from the target model forward
            next_token_ids: Next token ids generated from the target forward.
        """
        batch.spec_info = EagleDraftInput(
            hidden_states=hidden_states,
            verified_id=next_token_ids,
        )
        batch.spec_info.prepare_for_extend(batch)
        batch.spec_info.capture_hidden_mode = CaptureHiddenMode.LAST
        model_worker_batch = batch.get_model_worker_batch()
        forward_batch = ForwardBatch.init_new(
            model_worker_batch, self.draft_model_runner
        )
        forward_batch.return_logprob = False
        logits_output, _ = self.draft_model_runner.forward(forward_batch)
        self._detect_nan_if_needed(logits_output)
        assert isinstance(forward_batch.spec_info, EagleDraftInput)
        assert forward_batch.spec_info is batch.spec_info
        self.capture_for_decode(logits_output, forward_batch.spec_info)

    def forward_draft_extend_after_decode(self, batch: ScheduleBatch):
        # Backup fields that will be modified in-place
        seq_lens_backup = batch.seq_lens.clone()
        req_pool_indices_backup = batch.req_pool_indices
        accept_length_backup = batch.spec_info.accept_length
        return_logprob_backup = batch.return_logprob

        # Prepare metadata
        batch.forward_mode = ForwardMode.DRAFT_EXTEND
        batch.spec_info.prepare_extend_after_decode(
            batch,
            self.speculative_num_steps,
        )
        batch.spec_info.capture_hidden_mode = CaptureHiddenMode.LAST
        batch.return_logprob = False
        model_worker_batch = batch.get_model_worker_batch()
        forward_batch = ForwardBatch.init_new(
            model_worker_batch, self.draft_model_runner
        )

        # Run
        logits_output, _ = self.draft_model_runner.forward(forward_batch)

        self._detect_nan_if_needed(logits_output)
        self.capture_for_decode(logits_output, forward_batch.spec_info)

        # Restore backup.
        # This is because `seq_lens` can be modified in `prepare_extend_after_decode`
        batch.forward_mode = ForwardMode.DECODE
        batch.seq_lens = seq_lens_backup
        batch.req_pool_indices = req_pool_indices_backup
        batch.spec_info.accept_length = accept_length_backup
        batch.return_logprob = return_logprob_backup

    def capture_for_decode(
        self, logits_output: LogitsProcessorOutput, draft_input: EagleDraftInput
    ):
        probs = torch.softmax(logits_output.next_token_logits, dim=-1)
        draft_input.topk_p, draft_input.topk_index = fast_topk(probs, self.topk, dim=-1)
        draft_input.hidden_states = logits_output.hidden_states

    def _detect_nan_if_needed(self, logits_output: LogitsProcessorOutput):
        if self.enable_nan_detection:
            logits = logits_output.next_token_logits
            if torch.any(torch.isnan(logits)):
                logger.error("Detected errors during sampling! NaN in the logits.")
                raise ValueError("Detected errors during sampling! NaN in the logits.")

    def send_req_migrate(self, batch: ScheduleBatch, 
                         migration_indices: List[int], 
                         tree_cache: RadixCache,
                         phase : MigrationPhase,
                         start_idx : Optional[torch.Tensor] = None,
                         send_rids : Optional[List[str]] = None) -> Tuple[BatchSpecReqMigrationInfo, torch.Tensor, torch.Tensor]:

        if phase == MigrationPhase.STAGE2:
            assert start_idx is not None and send_rids is not None
            # --- Filtering Logic for STAGE2 ---
            # 1. Get the rids of requests currently in the batch.
            current_rids_in_batch = {req.rid: i for i, req in enumerate(batch.reqs)}
            # 2. Find which of the original STAGE1 requests still exist and get their current indices.
            final_migration_indices = []
            final_start_idx_list = []
            for i, original_rid in enumerate(send_rids):
                if original_rid in current_rids_in_batch:
                    # The request still exists, find its *new* index in the batch.
                    new_batch_idx = current_rids_in_batch[original_rid]
                    final_migration_indices.append(new_batch_idx)
                    # Keep the corresponding start_idx from the original STAGE1 lengths tensor.
                    final_start_idx_list.append(start_idx[i].item())
                
            # 3. Update the variables to use the filtered lists.
            migration_indices = final_migration_indices
            start_idx = torch.tensor(final_start_idx_list, device=start_idx.device, dtype=start_idx.dtype)        

        if len(migration_indices) == 0:
            res = BatchSpecReqMigrationInfo(
                phase=phase,
                kvcache_lens=start_idx,
                requests=[]
            )
            empty_tensor = torch.tensor([0],device=self.device)
            return res, empty_tensor, empty_tensor

        migration_indices = sorted(list(set(migration_indices)))
        
        migration_indices_tensor = torch.tensor(migration_indices)
        
        # --- 1. Gather information and KV cache for requests to be migrated ---
        
        num_draft_layers = self.model_runner.model_config.num_hidden_layers
        num_target_layers = self.target_worker.model_runner.model_config.num_hidden_layers

        draft_physical_k_buffers_all_layers = [self.model_runner.token_to_kv_pool.get_key_buffer(l) for l in range(num_draft_layers)]
        draft_physical_v_buffers_all_layers = [self.model_runner.token_to_kv_pool.get_value_buffer(l) for l in range(num_draft_layers)]
        target_physical_k_buffers_all_layers = [self.target_worker.model_runner.token_to_kv_pool.get_key_buffer(l) for l in range(num_target_layers)]
        target_physical_v_buffers_all_layers = [self.target_worker.model_runner.token_to_kv_pool.get_value_buffer(l) for l in range(num_target_layers)]
        
        
        req_pool_indices_migrating = batch.req_pool_indices[migration_indices_tensor].clone()    # (bs)
     
        seq_lens_migrating = batch.seq_lens[migration_indices_tensor].clone()        # (bs)


        num_migrating_reqs = migration_indices_tensor.numel()
        if phase == MigrationPhase.STAGE1:
            start_idx = torch.zeros(num_migrating_reqs, device=self.device, dtype=torch.long)
        else:
            assert start_idx is not None, "start_idx must be provided for delta migration phases."
            start_idx = start_idx.to(device=self.device, dtype=torch.long)

        # Shape: [num_migrating_reqs, max_context_len_in_pool]
        all_possible_slots = self.req_to_token_pool.req_to_token[req_pool_indices_migrating]

        max_len_migrating = torch.max(seq_lens_migrating).item() if num_migrating_reqs > 0 else 0

        arange_tensor = torch.arange(max_len_migrating, device=self.device)

        lower_bound_mask = arange_tensor >= start_idx.unsqueeze(1)
        upper_bound_mask = arange_tensor < seq_lens_migrating.unsqueeze(1)
        
        token_mask = lower_bound_mask & upper_bound_mask

        valid_slots_padded = all_possible_slots[:, :max_len_migrating]

        physical_slots_flat = valid_slots_padded[token_mask]

        spec_info = batch.spec_info
        hidden_states = spec_info.hidden_states
        hidden_state_numel_per_req = hidden_states[0].numel()
        
        if phase == MigrationPhase.STAGE1:
            concat_tensor_size = physical_slots_flat.numel() * self.kv_numel_per_token


        elif phase == MigrationPhase.STAGE2:
            concat_tensor_size_phase2 = physical_slots_flat.numel() * self.draft_kv_numel_per_token \
                                + len(migration_indices) * hidden_state_numel_per_req 

            concat_tensor_size_phase3 = physical_slots_flat.numel() * self.target_kv_numel_per_token  
            
            concat_tensor_size = concat_tensor_size_phase2 + concat_tensor_size_phase3

        concat_tensor = torch.empty(size=(concat_tensor_size,),
                                    dtype=self.model_runner.dtype,
                                    device=self.device)
        
        cur_numel_offset = 0

        for layer_idx in range(num_target_layers):
            k_physical_buffer = target_physical_k_buffers_all_layers[layer_idx]
            v_physical_buffer = target_physical_v_buffers_all_layers[layer_idx]
            
            # shape: [sum_seq_len, num_kv_heads, head_dim]
            k_for_layer_req = torch.index_select(k_physical_buffer, 0, physical_slots_flat)
            tensor_numel = k_for_layer_req.numel()
            concat_tensor[cur_numel_offset : cur_numel_offset + tensor_numel].copy_(k_for_layer_req.view(-1), non_blocking=True)
            cur_numel_offset += tensor_numel
            
            v_for_layer_req = torch.index_select(v_physical_buffer, 0, physical_slots_flat)
            tensor_numel = v_for_layer_req.numel()
            concat_tensor[cur_numel_offset : cur_numel_offset + tensor_numel].copy_(v_for_layer_req.view(-1), non_blocking=True)
            cur_numel_offset += tensor_numel
    
        
        for layer_idx in range(num_draft_layers):
            k_physical_buffer = draft_physical_k_buffers_all_layers[layer_idx]
            v_physical_buffer = draft_physical_v_buffers_all_layers[layer_idx]
            
            # shape: [sum_seq_len, num_kv_heads, head_dim]
            k_for_layer_req = torch.index_select(k_physical_buffer, 0, physical_slots_flat)
            tensor_numel = k_for_layer_req.numel()
            concat_tensor[cur_numel_offset : cur_numel_offset + tensor_numel].copy_(k_for_layer_req.view(-1), non_blocking=True)
            cur_numel_offset += tensor_numel
            
            v_for_layer_req = torch.index_select(v_physical_buffer, 0, physical_slots_flat)
            tensor_numel = v_for_layer_req.numel()
            concat_tensor[cur_numel_offset : cur_numel_offset + tensor_numel].copy_(v_for_layer_req.view(-1), non_blocking=True)
            cur_numel_offset += tensor_numel

        if phase == MigrationPhase.STAGE2:
            concat_tensor[cur_numel_offset:].copy_(hidden_states[migration_indices_tensor].view(-1), non_blocking=True)
            cur_numel_offset = concat_tensor.numel()
        
        torch.cuda.synchronize()
        assert cur_numel_offset == concat_tensor.numel()

        if phase != MigrationPhase.STAGE2:
            res = BatchSpecReqMigrationInfo(
                phase=phase,
                kvcache_lens=seq_lens_migrating
            )
            rids = []
            for i in migration_indices:
                rids.append(batch.reqs[i].rid)
            res.rids = rids
            return res, concat_tensor, 0
        else:
            reqs_to_migrate: List[Req] = [batch.reqs[i] for i in migration_indices]    # (bs)
            for req in reqs_to_migrate:
                tree_cache.cache_finished_req(req)
                req.tokenizer = None
                req.last_node = None
                req.prefix_indices = None

            topk_p_migrating = spec_info.topk_p[migration_indices_tensor].clone()
            topk_index_migrating = spec_info.topk_index[migration_indices_tensor].clone()
            verified_id_migrating = spec_info.verified_id[migration_indices_tensor].clone()
            extend_lens_migrating = [batch.extend_lens[i] for i in migration_indices]
            
            res = BatchSpecReqMigrationInfo(
                phase=phase,
                kvcache_lens=seq_lens_migrating-start_idx,
                requests=reqs_to_migrate,
                topk_p = topk_p_migrating,
                topk_index=topk_index_migrating,
                verified_id=verified_id_migrating,
                extend_lens=extend_lens_migrating,
                prefix_lens=batch.prefix_lens,
                extend_logprob_start_lens=batch.extend_logprob_start_lens
            )
            
            num_original_reqs = len(batch.reqs)
            keep_mask = [True] * num_original_reqs
            for batch_idx in migration_indices:
                if 0 <= batch_idx < num_original_reqs:
                    keep_mask[batch_idx] = False
            
            indices_to_keep = [i for i, keep in enumerate(keep_mask) if keep]


            indices_to_keep_tensor = torch.tensor(indices_to_keep, device=self.device, dtype=torch.long)
            batch.reqs = [batch.reqs[i] for i in indices_to_keep]
            
            if batch.req_pool_indices is not None:
                batch.req_pool_indices = batch.req_pool_indices[indices_to_keep_tensor]
            if batch.seq_lens is not None:
                batch.seq_lens = batch.seq_lens[indices_to_keep_tensor]
            
            batch.input_ids = batch.input_ids[indices_to_keep_tensor]
            batch.seq_lens_sum = torch.sum(batch.seq_lens).item()


            batch.spec_info.topk_p = batch.spec_info.topk_p[indices_to_keep_tensor]
            batch.spec_info.topk_index = batch.spec_info.topk_index[indices_to_keep_tensor]
            batch.spec_info.verified_id = batch.spec_info.verified_id[indices_to_keep_tensor]
            batch.spec_info.hidden_states = batch.spec_info.hidden_states[indices_to_keep_tensor]

            # Filter other per-request lists/tensors in ScheduleBatch if they exist and are populated
            if batch.prefix_lens is not None and len(batch.prefix_lens) == num_original_reqs:
                batch.prefix_lens = [batch.prefix_lens[i] for i in indices_to_keep]
            if batch.extend_lens is not None and len(batch.extend_lens) == num_original_reqs:
                batch.extend_lens = [batch.extend_lens[i] for i in indices_to_keep]
            if batch.extend_logprob_start_lens is not None and len(batch.extend_logprob_start_lens) == num_original_reqs:
                batch.extend_logprob_start_lens = [batch.extend_logprob_start_lens[i] for i in indices_to_keep]
            
            batch.sampling_info.temperatures = batch.sampling_info.temperatures[indices_to_keep_tensor]
            batch.sampling_info.top_ps = batch.sampling_info.top_ps[indices_to_keep_tensor]
            batch.sampling_info.top_ks = batch.sampling_info.top_ks[indices_to_keep_tensor]
            batch.sampling_info.min_ps = batch.sampling_info.min_ps[indices_to_keep_tensor]


            return res, concat_tensor[concat_tensor_size_phase3:] , concat_tensor[:concat_tensor_size_phase3]
    

    def recv_req_migrate(
        self,
        batch: ScheduleBatch,
        res1: BatchSpecReqMigrationInfo,
        concat_tensor1: torch.Tensor,
        res2: BatchSpecReqMigrationInfo,
        concat_tensor2: torch.Tensor,
        tree_cache: RadixCache,
        device: str,
        return_batch: bool
    ):
        """
        Receives two-phase migration data, and directly copies the bulk and delta
        KV caches into newly allocated physical slots. This version is refactored
        for improved readability and logical flow.
        """
        log_file = "/sglang/outputs/recv_req_migrate.txt"
        with open(log_file, "a") as f:
            f.write("--- New Migration Receive ---\n")
        
        # === Phase 0: Initial Setup & Device Placement ===
        torch.cuda.synchronize()
        t_start = time.time()

        if len(res2.requests) == 0:
            return None, None, None
        # Ensure all incoming metadata tensors are on the correct device
        res1.kvcache_lens = res1.kvcache_lens.to(device)
        res2.kvcache_lens = res2.kvcache_lens.to(device)
        res2.topk_p = res2.topk_p.to(device)
        res2.topk_index = res2.topk_index.to(device)
        res2.verified_id = res2.verified_id.to(device)
        
        # The large data tensors also need to be on the correct device
        concat_tensor1 = concat_tensor1.to(device)
        concat_tensor2 = concat_tensor2.to(device)
        
        torch.cuda.synchronize()
        t_setup_done = time.time()
        with open(log_file, "a") as f:
            f.write(f"Phase 0 (Setup & Move): {t_setup_done - t_start:.6f}s\n")

        # Helper function to unpack a tensor with the CORRECTED layer-interleaved logic
        def _unpack_tensor(res_info: BatchSpecReqMigrationInfo, concat_tensor: torch.Tensor) -> Dict[str, Any]:
            num_reqs = len(res_info.requests) if res_info.requests is not None else len(res_info.kvcache_lens)
            if num_reqs == 0:
                return {
                    "kv_cache_layers": {
                        "target_k_cache_layers": [], "target_v_cache_layers": [],
                        "draft_k_cache_layers": [], "draft_v_cache_layers": []
                    },
                    "hidden_states": None
                }

            # Use the correct, per-layer, per-token sizes you defined.
            target_numel_per_layer_token = self.target_numel_per_layer
            draft_numel_per_layer_token = self.draft_numel_per_layer

            kvcache_lens = res_info.kvcache_lens
            num_tokens_to_unpack = torch.sum(kvcache_lens).item()
            

            unpacked_kv_layers = {
                "target_k_cache_layers": [], "target_v_cache_layers": [],
                "draft_k_cache_layers": [], "draft_v_cache_layers": []
            }
            cur_offset = 0
            
            # --- Unpack Target Model (K and V interleaved per layer) ---
            num_target_layers = self.num_target_layers
            numel_per_layer_all_tokens_target = num_tokens_to_unpack * target_numel_per_layer_token
            if res_info.phase != MigrationPhase.STAGE2:
                for _ in range(num_target_layers):
                    # Unpack K for this layer
                    k_data_flat = concat_tensor[cur_offset : cur_offset + numel_per_layer_all_tokens_target]
                    unpacked_kv_layers["target_k_cache_layers"].append(k_data_flat.view(num_tokens_to_unpack, -1))
                    cur_offset += numel_per_layer_all_tokens_target
                    
                    # Unpack V for this layer
                    v_data_flat = concat_tensor[cur_offset : cur_offset + numel_per_layer_all_tokens_target]
                    unpacked_kv_layers["target_v_cache_layers"].append(v_data_flat.view(num_tokens_to_unpack, -1))
                    cur_offset += numel_per_layer_all_tokens_target

            # --- Unpack Draft Model (K and V interleaved per layer) ---
            num_draft_layers = self.num_draft_layers
            numel_per_layer_all_tokens_draft = num_tokens_to_unpack * draft_numel_per_layer_token
            for _ in range(num_draft_layers):
                # Unpack K for this layer
                k_data_flat = concat_tensor[cur_offset : cur_offset + numel_per_layer_all_tokens_draft]
                unpacked_kv_layers["draft_k_cache_layers"].append(k_data_flat.view(num_tokens_to_unpack, -1))
                cur_offset += numel_per_layer_all_tokens_draft
                
                # Unpack V for this layer
                v_data_flat = concat_tensor[cur_offset : cur_offset + numel_per_layer_all_tokens_draft]
                unpacked_kv_layers["draft_v_cache_layers"].append(v_data_flat.view(num_tokens_to_unpack, -1))
                cur_offset += numel_per_layer_all_tokens_draft
                    
            # Unpack Hidden States
            hidden_states = None
            if cur_offset < concat_tensor.numel():
                hidden_states_flat = concat_tensor[cur_offset:]
                hidden_dim = hidden_states_flat.numel() // num_reqs
                hidden_states = hidden_states_flat.view(num_reqs, hidden_dim)

            return {"kv_cache_layers": unpacked_kv_layers, "hidden_states": hidden_states}
        
        unpacked_p1 = _unpack_tensor(res1, concat_tensor1)
        unpacked_p2 = _unpack_tensor(res2, concat_tensor2)
        
        num_migrated_reqs = len(res2.requests)


        if res1.kvcache_lens.numel() != res2.kvcache_lens.numel():
            with open("/sglang/outputs/test_rid_survivors.txt", "a") as f:
                f.write("survivors test has happened\n")
            #filter the survivors
            original_rids_map = {rid: i for i, rid in enumerate(res1.rids)}
            filtered_kvcache_lens_p1_list = []
            # This will store the token slices we need to keep from the unpacked_p1 tensors.
            p1_token_slices_to_keep = []
            # Pre-calculate token offsets for the original, unfiltered STAGE1 data
            original_p1_token_offsets = torch.cat([
                torch.tensor([0], device=self.device),
                torch.cumsum(res1.kvcache_lens, 0)
            ])
            
            res2_rids = [res2.requests[i].rid for i in range(len(res2.requests))]

            # 3. Iterate through the SURVIVING requests (from res2) to determine what to keep from res1.
            for rid_survivor in res2_rids:

                # Find the original index of this survivor in the STAGE1 data
                original_idx = original_rids_map[rid_survivor]

                # Keep its original kvcache_len
                filtered_kvcache_lens_p1_list.append(res1.kvcache_lens[original_idx])

                # Determine the token slice to extract from the flat unpacked_p1 tensors
                start = original_p1_token_offsets[original_idx].item()
                end = original_p1_token_offsets[original_idx + 1].item()
                p1_token_slices_to_keep.append(slice(start, end))

            filtered_res1_kvcache_lens = torch.stack(filtered_kvcache_lens_p1_list)

            # Create the new, filtered unpacked_p1 data by applying the collected slices
            filtered_unpacked_p1 = {"kv_cache_layers": {}}
            for key, layer_list in unpacked_p1["kv_cache_layers"].items():
                # For each layer, concatenate the slices from all surviving requests
                filtered_layers = [
                    torch.cat([layer_data[s] for s in p1_token_slices_to_keep])
                    for layer_data in layer_list
                ]
                filtered_unpacked_p1["kv_cache_layers"][key] = filtered_layers

            res1.kvcache_lens = filtered_res1_kvcache_lens
            unpacked_p1 = filtered_unpacked_p1


        # === Phase 2: Allocate Resources ===
        final_kv_lens = res1.kvcache_lens + res2.kvcache_lens
        total_kv_slots_needed = torch.sum(final_kv_lens).item()
        
        new_req_pool_indices_list = self.req_to_token_pool.alloc(num_migrated_reqs)
        assert new_req_pool_indices_list is not None, "OOM in request pool."
        new_req_pool_indices = torch.tensor(new_req_pool_indices_list, device=self.device, dtype=torch.int32)

        new_physical_kv_slots_flat = torch.empty((0,), dtype=torch.int32, device=self.device)
        if total_kv_slots_needed > 0:
            new_physical_kv_slots_flat = self.token_to_kv_pool_allocator.alloc(total_kv_slots_needed)
            assert new_physical_kv_slots_flat is not None, "OOM in KV cache pool."

        torch.cuda.synchronize()
        t_alloc_done = time.time()
        with open(log_file, "a") as f:
            f.write(f"Phase 1-2 (Unpack & Alloc): {t_alloc_done - t_setup_done:.6f}s\n")
            
        # === Phase 3: Prepare for GPU Operations (Build Scatter Indices) ===
        pre_zero_tensor = torch.tensor([0], device=final_kv_lens.device, dtype=final_kv_lens.dtype)
        final_token_offsets = torch.cat([pre_zero_tensor, torch.cumsum(final_kv_lens, 0)])

        p1_scatter_indices_list, p2_scatter_indices_list = [], []
        for i in range(num_migrated_reqs):
            p1_len = res1.kvcache_lens[i].item()
            dest_slots_full = new_physical_kv_slots_flat[final_token_offsets[i] : final_token_offsets[i+1]]
            
            p1_scatter_indices_list.append(dest_slots_full[:p1_len])
            p2_scatter_indices_list.append(dest_slots_full[p1_len:])
            
            self.req_to_token_pool.req_to_token[new_req_pool_indices[i], :final_kv_lens[i]] = dest_slots_full

        p1_scatter_indices = torch.cat(p1_scatter_indices_list)
        p2_scatter_indices = torch.cat(p2_scatter_indices_list)

        # === Phase 4: Execute GPU Operations (KV Cache Restore) ===
        draft_model_config = self.model_runner.model_config
        target_model_config = self.target_worker.model_runner.model_config
        target_num_kv_heads, target_head_dim = target_model_config.num_key_value_heads, target_model_config.head_dim
        target_num_kv_heads = target_num_kv_heads // self.tp_size
        draft_num_kv_heads, draft_head_dim = draft_model_config.num_key_value_heads, draft_model_config.head_dim
        draft_num_kv_heads = draft_num_kv_heads // self.tp_size
        # --- Target Model Copy ---
        for layer_idx in range(target_model_config.num_hidden_layers):
            k_dest_buf = self.target_worker.model_runner.token_to_kv_pool.get_key_buffer(layer_idx)
            v_dest_buf = self.target_worker.model_runner.token_to_kv_pool.get_value_buffer(layer_idx)
            
            p1_k_src_flat = unpacked_p1["kv_cache_layers"]["target_k_cache_layers"][layer_idx]
            p1_v_src_flat = unpacked_p1["kv_cache_layers"]["target_v_cache_layers"][layer_idx]
            if p1_k_src_flat.numel() > 0:
                p1_k_src = p1_k_src_flat.view(-1, target_num_kv_heads, target_head_dim)
                p1_v_src = p1_v_src_flat.view(-1, target_num_kv_heads, target_head_dim)
                k_dest_buf.index_copy_(0, p1_scatter_indices, p1_k_src)
                v_dest_buf.index_copy_(0, p1_scatter_indices, p1_v_src)
        
        # --- Draft Model Copy (both p1 and p2 parts) ---
        for layer_idx in range(draft_model_config.num_hidden_layers):
            k_dest_buf = self.model_runner.token_to_kv_pool.get_key_buffer(layer_idx)
            v_dest_buf = self.model_runner.token_to_kv_pool.get_value_buffer(layer_idx)

            p1_k_src_flat = unpacked_p1["kv_cache_layers"]["draft_k_cache_layers"][layer_idx]
            p1_v_src_flat = unpacked_p1["kv_cache_layers"]["draft_v_cache_layers"][layer_idx]
            if p1_k_src_flat.numel() > 0:
                p1_k_src = p1_k_src_flat.view(-1, draft_num_kv_heads, draft_head_dim)
                p1_v_src = p1_v_src_flat.view(-1, draft_num_kv_heads, draft_head_dim)
                k_dest_buf.index_copy_(0, p1_scatter_indices, p1_k_src)
                v_dest_buf.index_copy_(0, p1_scatter_indices, p1_v_src)
                
            p2_k_src_flat = unpacked_p2["kv_cache_layers"]["draft_k_cache_layers"][layer_idx]
            p2_v_src_flat = unpacked_p2["kv_cache_layers"]["draft_v_cache_layers"][layer_idx]
            if p2_k_src_flat.numel() > 0:
                p2_k_src = p2_k_src_flat.view(-1, draft_num_kv_heads, draft_head_dim)
                p2_v_src = p2_v_src_flat.view(-1, draft_num_kv_heads, draft_head_dim)
                k_dest_buf.index_copy_(0, p2_scatter_indices, p2_k_src)
                v_dest_buf.index_copy_(0, p2_scatter_indices, p2_v_src)

        # === Phase 5: Finalize Requests & Prepare for Merge ===
        new_reqs_list, new_seq_lens_list = [], []
        for i in range(num_migrated_reqs):
            req = res2.requests[i]
            req.req_pool_idx = new_req_pool_indices[i].item()
            req.tokenizer = self.tokenizer
            req.prefix_indices = []
            req.last_node = tree_cache.root_node
            tree_cache.cache_unfinished_req(req)
            
            new_reqs_list.append(req)
            new_seq_lens_list.append(final_kv_lens[i].item())
        
        new_spec_hidden_states = unpacked_p2["hidden_states"]

        torch.cuda.synchronize()
        t_copy_done = time.time()
        with open(log_file, "a") as f:
            f.write(f"Phase 3-5 (Prep & GPU Copy): {t_copy_done - t_alloc_done:.6f}s\n")
            
        # === Phase 6: Merge into ScheduleBatch ===

        if not batch:
            batch = ScheduleBatch.init_new(
                [],
                self.req_to_token_pool,
                self.token_to_kv_pool_allocator,
                tree_cache,
                self.model_config,
                False,
                self.speculative_algorithm,
                self.server_args.enable_custom_logit_processor,
            )

        batch.reqs.extend(new_reqs_list)
        
        # Safely handle empty initial batch
        batch.req_pool_indices = new_req_pool_indices if batch.req_pool_indices is None else torch.cat([batch.req_pool_indices, new_req_pool_indices])
        batch.seq_lens = torch.tensor(new_seq_lens_list, device=self.device, dtype=torch.long) if batch.seq_lens is None else torch.cat([batch.seq_lens, torch.tensor(new_seq_lens_list, device=self.device, dtype=torch.long)])
        batch.seq_lens_sum = torch.sum(batch.seq_lens).item()
        if batch.extend_lens is None:
            batch.extend_lens = res2.extend_lens
        else:
            batch.extend_lens.extend(res2.extend_lens)
        if batch.prefix_lens is None:
            batch.prefix_lens = res2.prefix_lens
        if batch.extend_logprob_start_lens is None:
            batch.extend_logprob_start_lens = res2.extend_logprob_start_lens
        batch.extend_num_tokens = sum(batch.extend_lens)
        migrated_spec_info = EagleDraftInput(
            hidden_states=new_spec_hidden_states,
            topk_p=res2.topk_p,
            topk_index=res2.topk_index,
            verified_id=res2.verified_id,
        )
        if batch.spec_info is None:
            batch.spec_info = migrated_spec_info
        else:
            batch.spec_info.merge_batch(migrated_spec_info)

        batch.input_ids = res2.verified_id if batch.input_ids is None else torch.cat([batch.input_ids, res2.verified_id])
        batch.sampling_info = SamplingBatchInfo.from_schedule_batch(batch, target_model_config.vocab_size)

        batch.prepare_for_decode()

        logger.info(f"Successfully received and integrated {num_migrated_reqs} requests. Batch size now {len(batch.reqs)}.")

        t_end = time.time()
        with open(log_file, "a") as f:
            f.write(f"Phase 6 (Merge to Batch): {t_end - t_copy_done:.6f}s\n")
            f.write(f"Total recv_req_migrate time: {t_end - t_start:.6f}s\n")

        if return_batch:
            return new_req_pool_indices, new_physical_kv_slots_flat, batch
        else:
            return new_req_pool_indices, new_physical_kv_slots_flat, None

    def recv_req_migrate_phase3(
        self,
        concat_tensor_phase3 : torch.tensor,
        res1: BatchSpecReqMigrationInfo,
        res2: BatchSpecReqMigrationInfo,
        new_req_pool_indices,
        new_physical_kv_slots_flat,
    ):
        if len(res2.requests) == 0:
            return
        # Helper function to unpack a tensor with the CORRECTED layer-interleaved logic
        def _unpack_tensor(res_info: BatchSpecReqMigrationInfo, concat_tensor: torch.Tensor) -> Dict[str, Any]:
            num_reqs = len(res_info.requests) if res_info.requests is not None else len(res_info.kvcache_lens)

            # Use the correct, per-layer, per-token sizes you defined.
            target_numel_per_layer_token = self.target_numel_per_layer

            kvcache_lens = res_info.kvcache_lens
            num_tokens_to_unpack = torch.sum(kvcache_lens).item()

            unpacked_kv_layers = {
                "target_k_cache_layers": [], "target_v_cache_layers": []
            }
            cur_offset = 0
            
            # --- Unpack Target Model (K and V interleaved per layer) ---
            num_target_layers = self.num_target_layers
            numel_per_layer_all_tokens_target = num_tokens_to_unpack * target_numel_per_layer_token

            for _ in range(num_target_layers):
                # Unpack K for this layer
                k_data_flat = concat_tensor[cur_offset : cur_offset + numel_per_layer_all_tokens_target]
                unpacked_kv_layers["target_k_cache_layers"].append(k_data_flat.view(num_tokens_to_unpack, -1))
                cur_offset += numel_per_layer_all_tokens_target
                
                # Unpack V for this layer
                v_data_flat = concat_tensor[cur_offset : cur_offset + numel_per_layer_all_tokens_target]
                unpacked_kv_layers["target_v_cache_layers"].append(v_data_flat.view(num_tokens_to_unpack, -1))
                cur_offset += numel_per_layer_all_tokens_target

            return {"kv_cache_layers": unpacked_kv_layers}
        
        num_migrated_reqs = len(res2.requests)
        target_model_config = self.target_worker.model_runner.model_config
        unpacked_p = _unpack_tensor(res2 , concat_tensor_phase3)
        final_kv_lens = res1.kvcache_lens + res2.kvcache_lens
        pre_zero_tensor = torch.tensor([0], device=res1.kvcache_lens.device, dtype=res1.kvcache_lens.dtype)
        final_token_offsets = torch.cat([pre_zero_tensor, torch.cumsum(final_kv_lens, 0)])
        # Offsets for slicing the source data from the unpacked tensor
        p2_token_offsets = torch.cat([pre_zero_tensor, torch.cumsum(res2.kvcache_lens, 0)])

        for i in range(num_migrated_reqs):
            req = res2.requests[i]
            kv_len = final_kv_lens[i].item()
            p1_len = res1.kvcache_lens[i].item()
            new_req_pool_idx = new_req_pool_indices[i].item()
            
            if kv_len > 0:
                dest_slots_full = new_physical_kv_slots_flat[final_token_offsets[i] : final_token_offsets[i+1]]
                dest_slots_p2 = dest_slots_full[p1_len:]

                if kv_len > self.req_to_token_pool.req_to_token.shape[1]:
                    logger.error(f"KV cache length {kv_len} exceeds pool capacity for req {req.req_id}.")
                    continue

                self.req_to_token_pool.req_to_token[new_req_pool_idx, :kv_len] = dest_slots_full
                
                # --- Target Model Copy ---
                for layer_idx in range(target_model_config.num_hidden_layers):
                    k_dest_buf = self.target_worker.model_runner.token_to_kv_pool.get_key_buffer(layer_idx)
                    v_dest_buf = self.target_worker.model_runner.token_to_kv_pool.get_value_buffer(layer_idx)
                    
                    p2_k_src_flat = unpacked_p["kv_cache_layers"]["target_k_cache_layers"][layer_idx][p2_token_offsets[i]:p2_token_offsets[i+1]]
                    p2_v_src_flat = unpacked_p["kv_cache_layers"]["target_v_cache_layers"][layer_idx][p2_token_offsets[i]:p2_token_offsets[i+1]]
                    p2_k_src = p2_k_src_flat.view(-1, target_model_config.num_key_value_heads // self.tp_size, target_model_config.head_dim)
                    p2_v_src = p2_v_src_flat.view(-1, target_model_config.num_key_value_heads // self.tp_size, target_model_config.head_dim)
                    k_dest_buf.index_copy_(0, dest_slots_p2, p2_k_src)
                    v_dest_buf.index_copy_(0, dest_slots_p2, p2_v_src)
    
    def handle_migrate_phase3(self):
        if self.send_phase3:
            self.req.wait()
            self.concat_tensor_phase3 = None
            self.send_phase3 = False
        elif self.recv_phase3:
            self.req.wait()
            self.recv_req_migrate_phase3(self.concat_tensor_phase3,
                                         self.res1,
                                         self.res2,
                                         self.new_req_pool_indices,
                                         self.new_physical_kv_slots_flat)
            self.concat_tensor_phase3 = None
            self.recv_phase3 = False

def load_token_map(token_map_path: str) -> List[int]:
    if not os.path.exists(token_map_path):
        cache_dir = snapshot_download(
            os.path.dirname(token_map_path),
            ignore_patterns=["*.bin", "*.safetensors"],
        )
        token_map_path = os.path.join(cache_dir, os.path.basename(token_map_path))
    hot_token_id = torch.load(token_map_path, weights_only=True)
    return torch.tensor(hot_token_id, dtype=torch.int32)

