# NOTE: Please run this file to make sure the test cases are correct.

from typing import List

import torch

import pickle
import numpy as np

from sglang.srt.utils import is_cuda, is_hip


if is_cuda() or is_hip():
    from sgl_kernel import (
        build_tree_kernel_efficient as sgl_build_tree_kernel_efficient,
    )

def first_rank_print(*args, **kwargs):
    if torch.cuda.current_device() == 0:
        print(*args, **kwargs)
    else:
        pass

import time
def calculate_benefit(score_list, num_selected_token, seq_lens_sum, predictor):
    #import time
    #t1 = time.time()
    batch_size = score_list.shape[0]
    top_scores = torch.topk(score_list, num_selected_token - 1, dim=-1)
    accepted_token_num = top_scores.values.sum() + batch_size
    #t2 = time.time()
    verified_token_num = num_selected_token*batch_size
    verify_cost = verify_cost_prediction(verified_token_num, seq_lens_sum, predictor)
    benefit = float(accepted_token_num / verify_cost)
    #t3 = time.time()
    #first_rank_print("topk_time:{}, verify_time{}:".format(t2-t1, t3-t2))
    return benefit, top_scores

def calculate_benefit_batch(score_list, num_selected_tokens, seq_lens_sum, predictor):
    batch_size = score_list.shape[0]
    benefits = []
    top_scores_list = []
    accepted_token_num_list = []
    
    #wsq_test
    #seq_lens_sum = seq_lens_sum+batch_size
    max_k = max(num_selected_tokens) - 1
    all_top_scores = torch.topk(score_list, max_k, dim=-1)
    
    selection_threshold = 5
    counter = 0
    last_benefit = None
    
    for num_selected_token in num_selected_tokens:
        k = num_selected_token - 1
        # 修正：直接使用切片而不是创建 TopKReturn 对象
        top_scores_values = all_top_scores.values[:, :k]
        top_scores_indices = all_top_scores.indices[:, :k]
        
        accepted_token_num = top_scores_values.sum() + batch_size
        verified_token_num = num_selected_token * batch_size
        verify_cost = verify_cost_prediction(verified_token_num, seq_lens_sum, predictor)
        #first_rank_print("verified_token_num:{}, seq_lens_num:{}, cost{}".format(verified_token_num, seq_lens_sum, verify_cost), flush=True)
        #first_rank_print("predict accepted token num:", accepted_token_num, flush=True)
        accepted_token_num_list.append(accepted_token_num)
        benefit = float(accepted_token_num / verify_cost)
        
        benefits.append(benefit)
        # 创建一个简单的元组来存储 top_scores 信息
        top_scores = (top_scores_values, top_scores_indices)
        top_scores_list.append(top_scores)
        
        # early stopping 
        if last_benefit is not None and benefit < last_benefit:
            counter += 1
        else:
            counter = 0
        if counter > selection_threshold:
            break
        last_benefit = benefit
    
    return benefits, top_scores_list, accepted_token_num_list

BUCKET_RANGE_1 = 800
BUCKET_RANGE_2 = 10
MAX_SEQ_LENS_BUCKET = 100  # 35000 // 800 + 1
MAX_VERIFIED_TOKEN_BUCKET = 200  # 600 // 10 + 1

class SavedModelPredictor:
    """Predictor class for using saved regression models"""
    
    def __init__(self, model_file=None):
        """
        Initialize predictor with optional model file
        
        Args:
            model_file (str): Path to saved model file (.pkl)
        """
        self.model_file = model_file
        self.best_model = None
        self.best_model_name = None
        self.best_score = None
        self.scaler = None
        self.feature_names = None
        self.models = {}
        self.timestamp = None
        self.cached_data = np.full((MAX_SEQ_LENS_BUCKET, MAX_VERIFIED_TOKEN_BUCKET), None, dtype=object)
        
        if model_file:
            self.load_model(model_file)
    
    def load_model(self, model_file):
        """
        Load a saved model from file
        
        Args:
            model_file (str): Path to the saved model file
            
        Returns:
            dict: Model information
        """
        
        # Load from file
        with open(model_file, 'rb') as f:
            model_data = pickle.load(f)
        
        # Restore model data
        self.best_model = model_data['best_model']
        self.best_model_name = model_data['best_model_name']
        self.best_score = model_data['best_score']
        self.scaler = model_data['scaler']
        self.feature_names = model_data['feature_names']
        self.models = model_data.get('all_models', {})
        self.timestamp = model_data.get('timestamp', 'Unknown')
        return model_data
    
    def predict(self, input_data):
        """
        Make prediction using the loaded model
        
        Args:
            input_data: Dictionary or list containing four metrics
                       Format: {'metric1': val1, 'metric2': val2, 'metric3': val3, 'metric4': val4}
                       or [val1, val2, val3, val4]
        
        Returns:
            float: Predicted value
        """
        if self.best_model is None:
            raise ValueError("No model loaded. Please load a model first using load_model()")
        
        # Ensure correct input data format
        if isinstance(input_data, dict):
            # Validate all required features are present
            missing_features = set(self.feature_names) - set(input_data.keys())
            if missing_features:
                raise ValueError(f"Missing features: {missing_features}")
            
            features = np.array([[
                input_data['metric1'],
                input_data['metric2'],
                input_data['metric3'],
                input_data['metric4']
            ]])
        elif isinstance(input_data, list):
            if len(input_data) != 4:
                raise ValueError(f"Expected 4 features, got {len(input_data)}")
            features = np.array([input_data])
        else:
            raise ValueError("Input data must be dictionary or list format")
        
        # Standardize input data
        features_scaled = self.scaler.transform(features)
        
        # Predict
        prediction = self.best_model.predict(features_scaled)[0]
        
        return prediction
    
    def predict_batch(self, input_data_array):
        """
        Make batch predictions using the loaded model
        
        Args:
            input_data_array: numpy array with shape (batch_size, 4) containing four metrics
                             Format: [[metric1, metric2, metric3, metric4], ...]
        """
        input_scaled = self.scaler.transform(input_data_array)
        predictions = self.best_model.predict(input_scaled)
        
        metric1_values = input_data_array[:, 0].astype(int) // BUCKET_RANGE_1
        metric2_values = input_data_array[:, 1].astype(int) // BUCKET_RANGE_2
        
        for i, pred in enumerate(predictions):
            metric1 = metric1_values[i]
            metric2 = metric2_values[i]
            if 0 <= metric1 < MAX_SEQ_LENS_BUCKET and 0 <= metric2 < MAX_VERIFIED_TOKEN_BUCKET:
                self.cached_data[metric1, metric2] = float(pred)
            else:
                assert False, "Index out of range"
        return
    
    def get_model_info(self):
        """
        Get information about the loaded model
        
        Returns:
            dict: Model information
        """
        if self.best_model is None:
            return {"status": "No model loaded"}
        
        info = {
            'model_file': self.model_file,
            'best_model_name': self.best_model_name,
            'best_score': self.best_score,
            'feature_names': self.feature_names,
            'model_type': type(self.best_model).__name__,
            'timestamp': self.timestamp,
            'num_models_available': len(self.models)
        }
        
        return info
    
    def find_cache(self, input_data, cached_data):
        """
        Find cache for inputs using numpy array indexing
        """
        i0 = input_data['metric1']//BUCKET_RANGE_1
        i1 = input_data['metric2']//BUCKET_RANGE_2
        if 0 <= i0 < MAX_SEQ_LENS_BUCKET and 0 <= i1 < MAX_VERIFIED_TOKEN_BUCKET:
            cached_value = cached_data[i0, i1]
            if cached_value is not None:
                return cached_value
        else:
            assert False, "Index out of range"
        return None
    
    def prepare_input_data(self, metric1, metric2):
        """
        Prepare input data with fixed metric3 and metric4 values
        
        Args:
            metric1: First metric value
            metric2: Second metric value
            
        Returns:
            dict: Input data with all four metrics
        """
        return {
            'metric1': metric1,
            'metric2': metric2,
            'metric3': 846,  # Fixed value
            'metric4': 362.1  # Fixed value
        }
    def prepare_input_data_batch(self, seq_lens_sum, verified_token_num):
        input_data = []
        for i in seq_lens_sum:
            for j in verified_token_num:
                input_data.append(self.prepare_input_data(i, j))
        return input_data

def build_predictor():
    model_file = "/data/wangsiqi/regression_model_l40_2048.pkl" 
    # Create predictor and load model
    predictor = SavedModelPredictor(model_file)
    return predictor

def verify_cost_prediction(verified_token_num, seq_lens_sum, predictor):    
    input_data = predictor.prepare_input_data(seq_lens_sum, verified_token_num)
    cached_prediction = predictor.find_cache(input_data, predictor.cached_data)
    if cached_prediction is not None:
        prediction = cached_prediction
    else:
        first_rank_print("not hit cache, seq_length:{}, draft_num:{}".format(input_data['metric1'], input_data['metric2']),flush=True)
        prediction = predictor.predict(input_data)
        i0 = input_data['metric1']//BUCKET_RANGE_1
        i1 = input_data['metric2']//BUCKET_RANGE_2
        if 0 <= i0 < MAX_SEQ_LENS_BUCKET and 0 <= i1 < MAX_VERIFIED_TOKEN_BUCKET:
            predictor.cached_data[i0, i1] = prediction
        else:
            assert False, "Index out of range"
    return prediction

def build_tree_kernel_efficient_preprocess(
    verified_id: torch.Tensor,
    score_list: List[torch.Tensor],
    token_list: List[torch.Tensor],
    parents_list: List[torch.Tensor],
    num_verify_tokens: int,
    seq_lens_sum: int,
    predictor: SavedModelPredictor,
):
    score_list = torch.cat(score_list, dim=1).flatten(
        1
    )  # b, n, topk; n= 1 + (num_steps-1) * self.topk
    ss_token_list = torch.cat(
        token_list, dim=1
    )  # b, (self.topk + (num_steps-1) * self.topk)
    # top_scores = torch.topk(score_list, num_verify_tokens - 1, dim=-1)
    # top_scores_index = top_scores.indices
    # top_scores_index = torch.sort(top_scores_index).values
    # draft_tokens = torch.gather(ss_token_list, index=top_scores_index, dim=1)
    # draft_tokens = torch.cat((verified_id.unsqueeze(1), draft_tokens), dim=1).flatten()
    
    # wsq
    num_selected_tokens = list(range(2, num_verify_tokens + 1))
    #torch.cuda.synchronize()
    #t1 = time.time()
    benefits, top_scores_list, accepted_token_num = calculate_benefit_batch(score_list, num_selected_tokens, seq_lens_sum, predictor)
    #torch.cuda.synchronize()
    #t2 = time.time()
    #first_rank_print("calculate_benefit_batch_time:", t2-t1,flush=True)
    max_benefit_idx = np.argmax(benefits)
    max_benefit = benefits[max_benefit_idx]
    #first_rank_print("predict accepted token num:", accepted_token_num[max_benefit_idx], flush=True)
    best_num_verify_tokens = num_selected_tokens[max_benefit_idx]
    best_top_scores = top_scores_list[max_benefit_idx]
    
    if best_top_scores is not None:
        top_scores_index = best_top_scores[1]
        top_scores_index = torch.sort(top_scores_index).values
        draft_tokens = torch.gather(ss_token_list, index=top_scores_index, dim=1)
        draft_tokens = torch.cat((verified_id.unsqueeze(1), draft_tokens), dim=1).flatten()
    else:
        assert False, "No best top scores"


    if len(parents_list) > 1:
        parent_list = torch.cat(parents_list[:-1], dim=1)
    else:
        batch_size = parents_list[0].shape[0]
        parent_list = torch.empty(batch_size, 0, device=parents_list[0].device)
    #torch.cuda.synchronize()
    #t3 = time.time()
    return parent_list, top_scores_index, draft_tokens, best_num_verify_tokens

def orig_build_tree_kernel_efficient_preprocess(
    verified_id: torch.Tensor,
    score_list: List[torch.Tensor],
    token_list: List[torch.Tensor],
    parents_list: List[torch.Tensor],
    num_verify_tokens: int,
):
    score_list = torch.cat(score_list, dim=1).flatten(
        1
    )  # b, n, topk; n= 1 + (num_steps-1) * self.topk
    ss_token_list = torch.cat(
        token_list, dim=1
    )  # b, (self.topk + (num_steps-1) * self.topk)
    top_scores = torch.topk(score_list, num_verify_tokens - 1, dim=-1)
    top_scores_index = top_scores.indices
    top_scores_index = torch.sort(top_scores_index).values
    draft_tokens = torch.gather(ss_token_list, index=top_scores_index, dim=1)
    draft_tokens = torch.cat((verified_id.unsqueeze(1), draft_tokens), dim=1).flatten()

    if len(parents_list) > 1:
        parent_list = torch.cat(parents_list[:-1], dim=1)
    else:
        batch_size = parents_list[0].shape[0]
        parent_list = torch.empty(batch_size, 0, device=parents_list[0].device)

    return parent_list, top_scores_index, draft_tokens


def build_tree_kernel_efficient(
    verified_id: torch.Tensor,
    score_list: List[torch.Tensor],
    token_list: List[torch.Tensor],
    parents_list: List[torch.Tensor],
    seq_lens: torch.Tensor,
    seq_lens_sum: int,
    topk: int,
    spec_steps: int,
    num_verify_tokens: int,
    predictor: SavedModelPredictor,
    enable_dynamic_spec: bool
):
    if enable_dynamic_spec:
        parent_list, top_scores_index, draft_tokens, best_num_verify_tokens = (
            build_tree_kernel_efficient_preprocess(
                verified_id,
                score_list,
                token_list,
                parents_list,
                num_verify_tokens,
                seq_lens_sum,
                predictor,
            )
        )
        num_verify_tokens = best_num_verify_tokens
    else:
        parent_list, top_scores_index, draft_tokens = (
            orig_build_tree_kernel_efficient_preprocess(
                verified_id,
                score_list,
                token_list,
                parents_list,
                num_verify_tokens,
            )
        )
    bs = seq_lens.numel()
    device = seq_lens.device
    # e.g. for bs=1, tree_mask: num_draft_token, seq_lens_sum + num_draft_token (flattened)
    # where each row indicates the attending pattern of each draft token
    # TODO: make them torch.empty and fuse them into `sgl_build_tree_kernel`
    tree_mask = torch.full(
        (
            seq_lens_sum * num_verify_tokens
            + num_verify_tokens * num_verify_tokens * bs,
        ),
        True,
        device=device,
    )
    retrive_index = torch.full(
        (bs, num_verify_tokens), -1, device=device, dtype=torch.long
    )
    retrive_next_token = torch.full(
        (bs, num_verify_tokens), -1, device=device, dtype=torch.long
    )
    retrive_next_sibling = torch.full(
        (bs, num_verify_tokens), -1, device=device, dtype=torch.long
    )
    # position: where each token belongs to
    # e.g. if depth of each draft token is [0, 1, 1, 2] and the prompt length is 7
    # then, positions = [7, 8, 8, 9]
    positions = torch.empty((bs * num_verify_tokens,), device=device, dtype=torch.long)

    sgl_build_tree_kernel_efficient(
        parent_list,
        top_scores_index,
        seq_lens.to(torch.int32),
        tree_mask,
        positions,
        retrive_index,
        retrive_next_token,
        retrive_next_sibling,
        topk,
        spec_steps,
        num_verify_tokens,
    )
    return (
        tree_mask,
        positions,
        retrive_index,
        retrive_next_token,
        retrive_next_sibling,
        draft_tokens,
        num_verify_tokens,
    )


def test_build_tree_kernel_efficient():
    verified_id = torch.tensor([29974, 13], device="cuda", dtype=torch.int32)
    score_list = [
        torch.tensor(
            [
                [[7.1127e-01, 2.8292e-01, 2.2995e-03, 1.7357e-03]],
                [[9.7476e-01, 2.2219e-02, 6.5031e-04, 1.3212e-04]],
            ],
            dtype=torch.float32,
            device="cuda",
        ),
        torch.tensor(
            [
                [
                    [6.9142e-01, 1.2863e-02, 1.6873e-03, 1.1871e-03],
                    [2.4787e-01, 1.8818e-02, 1.4204e-02, 9.2235e-04],
                    [2.2971e-03, 1.6700e-06, 1.8737e-07, 8.3146e-08],
                    [1.2771e-03, 2.4374e-04, 1.7832e-04, 1.1947e-05],
                ],
                [
                    [8.4832e-02, 6.6068e-02, 5.8304e-02, 5.7851e-02],
                    [2.3616e-03, 1.1243e-03, 5.4368e-04, 2.7768e-04],
                    [2.5286e-04, 1.5578e-04, 2.8817e-05, 1.2888e-05],
                    [1.2834e-04, 2.5417e-06, 1.1279e-06, 1.6088e-08],
                ],
            ],
            dtype=torch.float32,
            device="cuda",
        ),
        torch.tensor(
            [
                [
                    [6.6438e-01, 2.6997e-02, 2.4236e-05, 4.0821e-06],
                    [2.4402e-01, 2.8409e-03, 5.0935e-04, 2.9022e-04],
                    [1.6178e-02, 2.0567e-03, 4.5892e-04, 3.0034e-05],
                    [1.3023e-02, 5.0497e-04, 3.6371e-04, 8.7750e-05],
                ],
                [
                    [2.3263e-02, 2.0054e-02, 9.3990e-03, 2.7783e-03],
                    [6.4156e-02, 5.5506e-04, 1.0429e-04, 9.7211e-05],
                    [4.9950e-02, 5.0630e-03, 9.0068e-04, 3.3656e-04],
                    [7.5817e-03, 8.5731e-04, 6.9972e-04, 6.0793e-04],
                ],
            ],
            dtype=torch.float32,
            device="cuda",
        ),
        torch.tensor(
            [
                [
                    [6.6420e-01, 1.0525e-04, 6.5864e-05, 1.2253e-06],
                    [1.3019e-01, 1.0461e-01, 5.2083e-03, 1.6777e-03],
                    [2.0103e-02, 6.7335e-03, 1.2625e-04, 1.0364e-05],
                    [1.5142e-02, 7.0819e-04, 9.6595e-05, 8.7951e-05],
                ],
                [
                    [5.8608e-02, 1.8840e-03, 7.8535e-04, 4.4400e-04],
                    [1.2185e-02, 2.0684e-03, 1.7418e-03, 1.4327e-03],
                    [6.2455e-03, 6.1487e-03, 2.6862e-03, 1.8034e-03],
                    [1.8590e-03, 1.6151e-03, 1.2481e-03, 3.6038e-04],
                ],
            ],
            dtype=torch.float32,
            device="cuda",
        ),
    ]
    token_list = [
        torch.tensor(
            [[29896, 29906, 29900, 29945], [13, 2, 29871, 28956]],
            dtype=torch.int64,
            device="cuda",
        ),
        torch.tensor(
            [
                [
                    29889,
                    29974,
                    29945,
                    29900,
                    29974,
                    29922,
                    29930,
                    29958,
                    29889,
                    29974,
                    29930,
                    29945,
                    29974,
                    29922,
                    29930,
                    29958,
                ],
                [
                    22550,
                    4136,
                    16492,
                    8439,
                    29871,
                    2,
                    3001,
                    13,
                    2,
                    13,
                    29906,
                    29946,
                    2,
                    13,
                    29871,
                    259,
                ],
            ],
            device="cuda",
        ),
        torch.tensor(
            [
                [
                    29946,
                    29945,
                    29953,
                    29906,
                    29896,
                    29945,
                    29900,
                    29906,
                    29896,
                    29945,
                    29906,
                    29953,
                    29896,
                    29945,
                    29906,
                    29946,
                ],
                [
                    29871,
                    2,
                    29901,
                    29889,
                    29871,
                    2,
                    395,
                    259,
                    29901,
                    29871,
                    2,
                    29889,
                    3001,
                    1234,
                    7146,
                    2186,
                ],
            ],
            device="cuda",
        ),
        torch.tensor(
            [
                [
                    29946,
                    29974,
                    29945,
                    29930,
                    29889,
                    29922,
                    29974,
                    29930,
                    29974,
                    29946,
                    29930,
                    29922,
                    29889,
                    29974,
                    29945,
                    29922,
                ],
                [
                    29941,
                    29906,
                    2,
                    29946,
                    29871,
                    450,
                    319,
                    14990,
                    29946,
                    29941,
                    2,
                    29906,
                    29871,
                    2,
                    3001,
                    13,
                ],
            ],
            device="cuda",
        ),
    ]
    parents_list = [
        torch.tensor(
            [[-1, 0, 1, 2, 3], [-1, 0, 1, 2, 3]], dtype=torch.int64, device="cuda"
        ),
        torch.tensor([[4, 8, 9, 10], [4, 5, 6, 7]], dtype=torch.int64, device="cuda"),
        torch.tensor(
            [[20, 24, 21, 28], [24, 28, 20, 21]], dtype=torch.int64, device="cuda"
        ),
        torch.tensor(
            [[36, 40, 41, 44], [36, 40, 44, 45]], dtype=torch.int64, device="cuda"
        ),
    ]
    seq_lens = torch.tensor([5, 10], dtype=torch.int64, device="cuda")
    topk = 4
    depth = 4
    num_draft_token = 8

    (
        tree_mask,
        position,
        retrive_index,
        retrive_next_token,
        retrive_next_sibling,
        draft_tokens,
    ) = build_tree_kernel_efficient(
        verified_id=verified_id,
        score_list=score_list,
        token_list=token_list,
        parents_list=parents_list,
        seq_lens=seq_lens,
        seq_lens_sum=torch.sum(seq_lens).item(),
        topk=topk,
        spec_steps=depth,
        num_verify_tokens=num_draft_token,
    )

    first_rank_print("=========== build tree kernel efficient ==========")
    # first_rank_print(f"{tree_mask=}", flush=True)
    first_rank_print(f"{position=}", flush=True)
    first_rank_print(f"{retrive_index=}", flush=True)
    first_rank_print(f"{retrive_next_token=}", flush=True)
    first_rank_print(f"{retrive_next_sibling=}", flush=True)
    first_rank_print(f"{draft_tokens=}", flush=True)
    assert position.tolist() == [5, 6, 6, 7, 7, 8, 8, 9, 10, 11, 12, 12, 12, 12, 13, 14]
    assert retrive_index.tolist() == [
        [0, 1, 2, 3, 4, 5, 6, 7],
        [8, 9, 10, 11, 12, 13, 14, 15],
    ]
    assert retrive_next_token.tolist() == [
        [1, 3, 4, 5, 6, 7, -1, -1],
        [1, 2, -1, 6, -1, -1, 7, -1],
    ]
    assert retrive_next_sibling.tolist() == [
        [-1, 2, -1, -1, -1, -1, -1, -1],
        [-1, -1, 3, 4, 5, -1, -1, -1],
    ]
    assert draft_tokens.tolist() == [
        29974,
        29896,
        29906,
        29889,
        29974,
        29946,
        29896,
        29946,
        13,
        13,
        22550,
        4136,
        16492,
        8439,
        29871,
        29941,
    ]


if __name__ == "__main__":
    test_build_tree_kernel_efficient()
