"""A GPU worker class."""
import gc
import os
from typing import TYPE_CHECKING, Optional, Tuple

import torch
import torch.distributed

import vllm.envs as envs
from vllm.config import CacheConfig, ModelConfig, ParallelConfig, VllmConfig
from vllm.distributed import (ensure_model_parallel_initialized,
                              init_distributed_environment,
                              set_custom_all_reduce)
from vllm.logger import init_logger
from vllm.model_executor import set_random_seed
from vllm.platforms import current_platform
from vllm.utils import STR_DTYPE_TO_TORCH_DTYPE, LayerBlockType, get_dtype_size
from vllm.v1.core.scheduler import SchedulerOutput
from vllm.v1.outputs import ModelRunnerOutput
from vllm.v1.worker.gpu_model_runner import GPUModelRunner

logger = init_logger(__name__)

if TYPE_CHECKING:
    from vllm.v1.core.scheduler import SchedulerOutput


class Worker:
    """
    #! 每个Worker关联一张GPU, worker负责KV Cache的管理和在GPU上运行模型, 在分布式推理的情况下, 每个模型worker
    #! 包含了模型的一部分！
    """
    def __init__(
        self,
        vllm_config: VllmConfig,
        local_rank: int,
        rank: int,
        distributed_init_method: str, #! 分布式环境的初始化方法，init_method： “TCP://”，“ENV://”，“FILE://”
    ):
        # TODO: use WorkerBase.__init__(self, vllm_config=vllm_config)
        self.vllm_config           = vllm_config
        self.model_config          = vllm_config.model_config
        self.cache_config          = vllm_config.cache_config
        self.lora_config           = vllm_config.lora_config
        self.load_config           = vllm_config.load_config
        self.parallel_config       = vllm_config.parallel_config
        self.scheduler_config      = vllm_config.scheduler_config
        self.device_config         = vllm_config.device_config
        self.speculative_config    = vllm_config.speculative_config
        self.prompt_adapter_config = vllm_config.prompt_adapter_config
        self.observability_config  = vllm_config.observability_config

        self.rank       = rank
        self.local_rank = local_rank
        self.distributed_init_method = distributed_init_method

        #! 初始化transformers库的动态模块加载机制，确保支持下载和运行远程模型仓库的代码
        if self.model_config.trust_remote_code:
            # note: lazy import to avoid importing torch before initializing
            from vllm.utils import init_cached_hf_modules
            init_cached_hf_modules()

        # Torch profiler. Enabled and configured through env vars:
        # VLLM_TORCH_PROFILER_DIR=/path/to/save/trace
        #! torch.profiler.profile 是 PyTorch 提供的一个性能分析工具，用于分析和优化模型训练或推理过程中
        #! 的性能瓶颈。它可以记录 CPU 和 GPU 上的操作时间、内存使用情况、CUDA 内核执行时间等，帮助开发者定
        #! 位性能问题。
        if envs.VLLM_TORCH_PROFILER_DIR:
            torch_profiler_trace_dir = envs.VLLM_TORCH_PROFILER_DIR
            logger.info("Profiling enabled. Traces will be saved to: %s",
                        torch_profiler_trace_dir)
            self.profiler = torch.profiler.profile(
                activities=[
                    torch.profiler.ProfilerActivity.CPU,   #! 记录CPU和GPU的活动
                    torch.profiler.ProfilerActivity.CUDA,
                ],
                with_stack=True,
                on_trace_ready=torch.profiler.tensorboard_trace_handler(
                    torch_profiler_trace_dir, use_gzip=True))
        else:
            self.profiler = None

    def initialize(self):
        if self.device_config.device.type == "cuda":   # TODO: 看沐曦和天数的定义
            # torch.distributed.all_reduce does not free the input tensor until
            # the synchronization point. This causes the memory usage to grow
            # as the number of all_reduce calls increases. This env var disables
            # this behavior.
            # Related issue:
            # https://discuss.pytorch.org/t/cuda-allocation-lifetime-for-inputs-to-distributed-all-reduce/191573
            #! 默认NCCL会记录CUDA流以确保通信操作的顺序型和正确性，然后记录流可能引起额外的开销
            #! TORCH_NCCL_AVOID_RECORD_STREAMS=1可以避免记录CUDA流减少开销，但需要程序本身保证结果的正确性
            os.environ["TORCH_NCCL_AVOID_RECORD_STREAMS"] = "1"

            # This env var set by Ray causes exceptions with graph building.
            os.environ.pop("NCCL_ASYNC_ERROR_HANDLING", None)
            self.device = torch.device(f"cuda:{self.local_rank}")  #! locak_rank

            # TODO: Ascend的能否使用torch.cuda = torch.ascend的操作给替换掉，以保持后续代码的一致性
            torch.cuda.set_device(self.device)                     

            _check_if_gpu_supports_dtype(self.model_config.dtype)
            gc.collect()
            torch.cuda.empty_cache()

            #! torch.cuda.mem_get_info(): [当前可用显存字节数， 总显存字节数]
            #! 记录进程初始化时，该GPU可用的显存字节数(通常每个进程对应一张卡)
            self.init_gpu_memory = torch.cuda.mem_get_info()[0]
        else:
            raise RuntimeError(
                f"Not support device type: {self.device_config.device}")
        
        #! 初始化整个分布式环境
        init_worker_distributed_environment(self.parallel_config, self.rank,
                                            self.distributed_init_method,
                                            self.local_rank)
        # Set random seed.
        set_random_seed(self.model_config.seed)  #! 同时设置： torch, random, numpy

        #! GPUModelRunner负责模型的运行
        self.model_runner = GPUModelRunner(self.vllm_config, self.device)

    def load_model(self) -> None:
        self.model_runner.load_model()


    #! 通过一次dummy的inference和对系统现存的分析，计算出当前可用用KV的显存块数量(考虑块大小)
    @torch.inference_mode()
    def determine_num_available_blocks(self) -> Tuple[int, int]:
        """Profiles the peak memory usage of the model to determine how many
        KV blocks may be allocated without OOMs.
        #! 分析模型的显存峰值利用情况，以确定在不发生OOM的情况下可以分配多少KV块

        The engine will first conduct a profiling of the existing memory usage.
        Then, it calculate the maximum possible number of GPU and CPU blocks
        that can be allocated with the remaining free memory.

        .. tip::
            You may limit the usage of GPU memory
            by adjusting the `gpu_memory_utilization` parameter.
        """
        # Profile the memory usage of the model and get the maximum number of
        # cache blocks that can be allocated with the remaining free memory.
        torch.cuda.empty_cache()
        torch.cuda.reset_peak_memory_stats()

        _, total_gpu_memory = torch.cuda.mem_get_info()    # 获取GPU总显存的字节数
        #! 使用一个假的输入使模型前向传播一次，以分析显存的使用情况
        self.model_runner.profile_run()
        torch.cuda.synchronize()

        free_gpu_memory, _ = torch.cuda.mem_get_info()
        # NOTE(woosuk): Here we assume that the other processes using the same
        # GPU did not change their memory usage during the profiling.
        assert self.init_gpu_memory > free_gpu_memory, (   #! 运行了一次剩余的显存应该减少
            "Error in memory profiling. "
            f"Initial free memory {self.init_gpu_memory}, current free memory"
            f" {free_gpu_memory}. This happens when the GPU memory was "
            "not properly cleaned up before initializing the vLLM instance.")

        # Get the peak memory allocation recorded by torch
        peak_memory = torch.cuda.memory_stats()["allocated_bytes.all.peak"]

        # Check for any memory left around that may have been allocated on the
        # gpu outside of `torch`. NCCL operations, for example, can use a few
        # GB during a forward pass
        torch.cuda.empty_cache()
        torch_allocated_bytes = torch.cuda.memory_stats(                       #! 返回PyTorch占用的总显存
        )["allocated_bytes.all.current"]
        total_allocated_bytes = torch.cuda.mem_get_info(                       #! 当前各进程总的显存占用
        )[1] - torch.cuda.mem_get_info()[0]     # TODO: 为什么这里写两次？
        non_torch_allocations = total_allocated_bytes - torch_allocated_bytes
        if non_torch_allocations > 0:
            peak_memory += non_torch_allocations
        available_kv_cache_memory = (
            total_gpu_memory * self.cache_config.gpu_memory_utilization -
            peak_memory)                                                       #! 抛开别的进程的占用

        # Calculate the number of blocks that can be allocated with the
        # profiled peak memory.
        cache_block_size = _get_cache_block_size(self.cache_config,
                                                 self.model_config,
                                                 self.parallel_config)
        num_gpu_blocks = int(available_kv_cache_memory // cache_block_size)
        num_gpu_blocks = max(num_gpu_blocks, 0)
        return num_gpu_blocks, 0                                  #! 返回可用于KV的块数量(该值已经考虑块大小)


    #! 初始化KV Cache
    def initialize_cache(self, num_gpu_blocks: int) -> None:
        """Allocate GPU and CPU KV cache with the specified number of blocks."""
        if num_gpu_blocks <= 0:
            raise ValueError("No available memory for the cache blocks. "
                             "Try increasing `gpu_memory_utilization` when "
                             "initializing the engine.")

        max_seq_len   = self.cache_config.block_size * num_gpu_blocks
        max_model_len = self.model_config.max_model_len

        #! 一个序列的KV 都存不下，直接报错
        if max_model_len > max_seq_len:
            raise ValueError(
                f"The model's max seq len ({max_model_len}) "
                "is larger than the maximum number of tokens that can be "
                f"stored in KV cache ({max_seq_len}). Try increasing "
                "`gpu_memory_utilization` or decreasing `max_model_len` when "
                "initializing the engine.")

        self.model_runner.initialize_kv_cache(num_gpu_blocks)


    #! 只要不使用eager模型，就进行图捕获
    def compile_or_warm_up_model(self) -> None:
        if not self.model_config.enforce_eager:
            self.model_runner.capture_model()
        # Reset the seed to ensure that the random state is not affected by
        # the model initialization and profiling.
        #! 同时设置： torch, random, numpy
        set_random_seed(self.model_config.seed)

    @torch.inference_mode()
    def execute_model(
        self,
        scheduler_output: "SchedulerOutput",
    ) -> ModelRunnerOutput:
        # TODO: 考虑gather代替all reduce以节省贷款提高通信效率
        output = self.model_runner.execute_model(scheduler_output)
        return output if self.rank == 0 else None

    def profile(self, is_start: bool = True):
        if self.profiler is None:
            raise RuntimeError("Profiler is not enabled.")
        if is_start:
            self.profiler.start()
        else:
            self.profiler.stop()

    def check_health(self) -> None:
        # worker will always be healthy as long as it's running.
        return


#! 初始化分布式环境
def init_worker_distributed_environment(
    parallel_config: ParallelConfig,
    rank: int,
    distributed_init_method: Optional[str] = None,
    local_rank: int = -1,
) -> None:
    """Initialize the distributed environment."""
    #! 设置是否在TP中使用自定义的all_reduce op
    set_custom_all_reduce(not parallel_config.disable_custom_all_reduce)

    init_distributed_environment(parallel_config.world_size, rank,
                                 distributed_init_method, local_rank)

    ensure_model_parallel_initialized(parallel_config.tensor_parallel_size,
                                      parallel_config.pipeline_parallel_size)


def _check_if_gpu_supports_dtype(torch_dtype: torch.dtype):
    # Check if the GPU supports the dtype.
    if torch_dtype == torch.bfloat16:  # noqa: SIM102
        if not current_platform.has_device_capability(80):
            capability = current_platform.get_device_capability()
            gpu_name = current_platform.get_device_name()

            if capability is None:
                compute_str = "does not have a compute capability"
            else:
                version_str = capability.as_version_str()
                compute_str = f"has compute capability {version_str}"

            raise ValueError(
                "Bfloat16 is only supported on GPUs with compute capability "
                f"of at least 8.0. Your {gpu_name} GPU {compute_str}. "
                "You can use float16 instead by explicitly setting the"
                "`dtype` flag in CLI, for example: --dtype=half.")



# TODO: 为什么跟vllm/vllm/worker/cache_engine.py中的方法重复： [Done] 看起来v1采用了在Worker中自己管理KV
#! 计算一个KV Cache块的显存空间大小
#! 一个KV Cache块的大小可以通用参数指定，通常是16、32、64等
#! 每个token的KV Cache的大小计算方式为:
#   1. 每一个K/V向量的元素格式等于隐藏层大小的维度，即d;
#   2. 在MHA中，d 被分为num_heads个head_size， 因此一个K/V向量的元素格式为num_heads * head_size;
#   3. K/V两个向量的元素综述为num_heads * head_size * 2;
#   3. 假设模型有num_layers层，因此该模型对应的一个token对应的KV Cache元素数量为num_heads * head_size * 2 * num_layers;
#   5. 假设一个KV Cache块存block_size个token的KV Cache，于是有: block_size * num_heads * head_size * 2 * num_layers
#   6. 占用的空间为: block_size * num_heads * head_size * 2 * num_layers * sizeof(kvcache dtype)
# 
def _get_cache_block_size(
    cache_config: CacheConfig,
    model_config: ModelConfig,
    parallel_config: ParallelConfig,
) -> int:
    head_size = model_config.get_head_size()
    num_heads = model_config.get_num_kv_heads(parallel_config)
    num_attention_layers = model_config.get_num_layers_by_block_type(
        parallel_config, LayerBlockType.attention)

    key_cache_block = cache_config.block_size * num_heads * head_size
    value_cache_block = key_cache_block
    total = num_attention_layers * (key_cache_block + value_cache_block)
    if cache_config.cache_dtype == "auto":
        dtype = model_config.dtype
    else:
        dtype = STR_DTYPE_TO_TORCH_DTYPE[cache_config.cache_dtype]
    dtype_size = get_dtype_size(dtype)
    return dtype_size * total
