# # SPDX-License-Identifier: Apache-2.0
import os 
import torch
import multiprocessing as mp

from   typing import Tuple

from vllm                    import LLM, SamplingParams
from vllm.config             import ModelConfig
from vllm.config             import ParallelConfig
from vllm.distributed.utils  import get_pp_indices


os.environ["VLLM_USE_V1"]  = "0"


prompt = "很高兴见到你, " 
model  = "/data/yangxianpku/models/Qwen/Qwen2.5-7B-Instruct"


def get_layer_info(
        model_config: ModelConfig,
        parallel_config: "ParallelConfig"
    ) -> Tuple[int, int, int]:
    """获取指定pp_rank下对应的模型起始和结束层编号信息(自动计算pp_rank)

    Args:
        model_config (ModelConfig):       模型配置
        parallel_config (ParallelConfig): 并行配置

    Returns:
        Tuple[int, int, int]: 返回模型层数、pp_rank对应的start, end 层编号
    """
    if model_config.hf_text_config.model_type == "deepseek_mtp":
        total_num_hidden_layers = getattr(model_config.hf_text_config,
                                            "num_nextn_predict_layers", 0)
    else:
        total_num_hidden_layers = getattr(model_config.hf_text_config,
                                            "num_hidden_layers", 0)
    pp_rank = parallel_config.rank // parallel_config.tensor_parallel_size
    pp_size = parallel_config.pipeline_parallel_size

    start, end = get_pp_indices(total_num_hidden_layers, pp_rank, pp_size)

    return total_num_hidden_layers, start, end

def main():
    # 初始化LLM引擎
    llm = LLM(model=model,
            max_model_len=8192,
            tensor_parallel_size=2,
            pipeline_parallel_size=2, 
            trust_remote_code=True,
            # enable_chunked_prefill=False,
            dtype=torch.bfloat16,
            enforce_eager=True,
            gpu_memory_utilization=0.8
          )
    
    model_config    = llm.llm_engine.model_config
    parallel_config = llm.llm_engine.parallel_config

    total_num_hidden_layers, start, end = get_layer_info(model_config, 
                                                        parallel_config
                                                    )
    
    print(total_num_hidden_layers, start, end)

    # 进行推理
    # sampling_params = SamplingParams(
    #                             temperature=0, 
    #                             top_p=0.95, 
    #                             max_tokens=100
    #                           )
    # outputs = llm.generate(prompt, sampling_params)
    
    # for output in outputs:
    #     print(f"Generated text: {output.outputs[0].text}")

if __name__ == "__main__":
    # 设置多进程启动方法
    mp.set_start_method('spawn', force=True)
    main()