# Copyright 2023 The vLLM team.
# Adapted from
# https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/core/tensor_parallel/utils.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
import dataclasses
import pickle
import time
from   collections import deque
from   typing      import Any, Deque, Dict, Optional, Sequence, Tuple

import torch
from   torch.distributed import TCPStore            #! TCP的方式共享元数据

import vllm.envs   as envs
from   vllm.logger import init_logger

logger = init_logger(__name__)

# 分子可以被分母整除
def ensure_divisibility(numerator, denominator):
    """Ensure that numerator is divisible by the denominator."""
    assert numerator % denominator == 0, "{} is not divisible by {}".format(
        numerator, denominator)

#! 确保可以整除的情况下计算整的结果
def divide(numerator, denominator):
    """Ensure that numerator is divisible by the denominator and return
    the division value."""
    ensure_divisibility(numerator, denominator)
    return numerator // denominator


# TODO: 为什么是沿着最后一个维度切分？
def split_tensor_along_last_dim(
    tensor: torch.Tensor,
    num_partitions: int,
    contiguous_split_chunks: bool = False,
) -> Sequence[torch.Tensor]:
    """ Split a tensor along its last dimension.

        Arguments:
            tensor: input tensor.
            num_partitions: number of partitions to split the tensor
            contiguous_split_chunks: If True, make each chunk contiguous
                                     in memory.

        Returns:
            A list of Tensors
    """
    # Get the size and dimension.
    last_dim = tensor.dim() - 1
    last_dim_size = divide(tensor.size()[last_dim], num_partitions)  #! 不能整除的话断言失败
    # Split.
    tensor_list = torch.split(tensor, last_dim_size, dim=last_dim)
    # NOTE: torch.split does not create contiguous tensors by default.
    if contiguous_split_chunks:
        return tuple(chunk.contiguous() for chunk in tensor_list)    #! 保证每个切分后的每个tensor都是内存连续的

    return tensor_list

#! 给PP并行的Worker划分具体的模型层的其实和结束层编号
def get_pp_indices(num_hidden_layers: int, pp_rank: int,
                   pp_size: int) -> Tuple[int, int]:
    """Try to evenly distribute layers across partitions.
    If the number of layers is not divisible by the number of partitions,
    the last partition will have the remaining layers.
    前面的均分，最后一个PP Rank保留剩下的
    """
    partition_list_str = envs.VLLM_PP_LAYER_PARTITION   # str
    if partition_list_str is not None:   #! 1. 根据提供的规则来划分PP并行
        try:
            partitions = [
                int(layer) for layer in partition_list_str.split(",")
            ]
        except ValueError as err:
            raise ValueError("Invalid partition string: {}".format(
                partition_list_str)) from err
        if len(partitions) != pp_size:
            raise ValueError(f"{len(partitions)=} does not match {pp_size=}.")
        if sum(partitions) != num_hidden_layers:
            raise ValueError(
                f"{sum(partitions)=} does not match {num_hidden_layers=}.")
        start_layer = sum(partitions[:pp_rank])
        end_layer = start_layer + partitions[pp_rank]
    else:                                 #! 2. 没有提供规则则根据默认的方式来划分
        # TODO: 怎么感觉这个计算逻辑有问题？？？？？
        layers_per_partition = num_hidden_layers // pp_size
        start_layer = pp_rank * layers_per_partition
        end_layer = start_layer + layers_per_partition

        if pp_rank == pp_size - 1:
            end_layer = num_hidden_layers

    return (start_layer, end_layer)


#! 可以理解为是PyTorch的元数据集合通信库，实现了send、recv/broadcast/allgather/barrier功能
@dataclasses.dataclass
class StatelessProcessGroup:            #! 无状态进程组(dataclass)
    """A dataclass to hold a metadata store, and the rank, world_size of the
    group. Only use it to communicate metadata between processes.
    For data-plane communication, create NCCL-related objects.
    """
    #! 用于组内进程的元数据通信，数据面的通信依然使用NCCL相关的对象
    rank: int
    world_size: int

    #! torch._C._distributed_c10d.Store是PyTorch分布式通信块(torch.distributed)中的一个底层组件,
    # 用于在分布式训练中实现进程间的键值对存储和同步，主要用于管理分布式中的元数据和状态信息。
    #
    #! 主要实现以下功能
    #   - 初始化分布式进程组: 分布式训练中不同进程需要知道彼此的地址和端口信息, Store用于存储和共享这些信息
    #   - 同步屏障: Store可以用于实现进程间的同步, 确保所有进程都达到某个状态后再继续执行
    #   - 共享元数据: 分布式训练中，Store用于共享一些元数据，如模型的状态、超参数等
    #! Store的具体实现取决于后端，常见的后端实现方式包括：
    #   TCPStore:  基于TCP/IP的Store, 适用于多机分布式训练;
    #   FileStore: 基于文件系统的Store实现，适用于单机多进程训练;
    #   HashStore: 基于内存的Store实现，适用于测试和调试;
    store: torch._C._distributed_c10d.Store       #! 分布式键值对存储
    data_expiration_seconds: int = 3600           #! 数据过期时间: 1 hour

    # dst rank -> counter
    #! default_factory 参数用于指定一个函数，该函数会在字段需要默认值时被调用，并返回一个值。
    # [int, int] ==> (dst_rank, counter) 向某个目标rank发送的数据总数
    send_dst_counter: Dict[int, int] = dataclasses.field(default_factory=dict)
    # src rank -> counter
    recv_src_counter: Dict[int, int] = dataclasses.field(default_factory=dict)
    broadcast_send_counter: int      = 0
    broadcast_recv_src_counter: Dict[int, int] = dataclasses.field(default_factory=dict)

    # A deque to store the data entries, with key and timestamp.
    #! deque 是双端队列，非线程安全，需要用户自己加锁
    entries: Deque[Tuple[str, float]] = dataclasses.field(default_factory=deque)

    def __post_init__(self):
        assert self.rank < self.world_size
        self.send_dst_counter = {i: 0 for i in range(self.world_size)}
        self.recv_src_counter = {i: 0 for i in range(self.world_size)}
        self.broadcast_recv_src_counter = {
            i: 0
            for i in range(self.world_size)
        }

    #! 向目标rank发送一个obj对象数据
    # 本质上只是存在了元数据中，没有用过集合通信发送
    def send_obj(self, obj: Any, dst: int):
        """Send an object to a destination rank."""
        self.expire_data()          # TODO: 为什么要删掉一条数据， 保证数据不一直增长？
        key = f"send_to/{dst}/{self.send_dst_counter[dst]}"
        self.store.set(key, pickle.dumps(obj))
        self.send_dst_counter[dst] += 1
        self.entries.append((key, time.time()))

    #! 最左侧的数据是否过期，如果过期就删除
    def expire_data(self):
        """Expire data that is older than `data_expiration_seconds` seconds."""
        while self.entries:
            # check the oldest entry
            key, timestamp = self.entries[0]   #! 左侧第一个元素
            if time.time() - timestamp > self.data_expiration_seconds:
                self.store.delete_key(key)
                self.entries.popleft()         #! 过期的数据就扔掉
            else:
                break                          #! 只判断一次

    #! 从元数据中取出一个别人发你的数据
    def recv_obj(self, src: int) -> Any:
        """Receive an object from a source rank."""
        obj = pickle.loads(
            self.store.get(
                f"send_to/{self.rank}/{self.recv_src_counter[src]}"))
        self.recv_src_counter[src] += 1
        return obj

    def broadcast_obj(self, obj: Optional[Any], src: int) -> Any:
        """Broadcast an object from a source rank to all other ranks.
        It does not clean up after all ranks have received the object.
        Use it for limited times, e.g., for initialization.
        """
        if self.rank == src:
            self.expire_data()
            key = (f"broadcast_from/{src}/"
                   f"{self.broadcast_send_counter}")
            self.store.set(key, pickle.dumps(obj))
            self.broadcast_send_counter += 1
            self.entries.append((key, time.time()))
            return obj
        else:
            key = (f"broadcast_from/{src}/"
                   f"{self.broadcast_recv_src_counter[src]}")
            recv_obj = pickle.loads(self.store.get(key))
            self.broadcast_recv_src_counter[src] += 1
            return recv_obj

    def all_gather_obj(self, obj: Any) -> list[Any]:
        """All gather an object from all ranks."""
        gathered_objs = []
        for i in range(self.world_size):
            if i == self.rank:
                gathered_objs.append(obj)
                self.broadcast_obj(obj, src=self.rank)
            else:
                recv_obj = self.broadcast_obj(None, src=i)
                gathered_objs.append(recv_obj)
        return gathered_objs

    def barrier(self):
        """A barrier to synchronize all ranks."""
        for i in range(self.world_size):
            if i == self.rank:
                self.broadcast_obj(None, src=self.rank)
            else:
                self.broadcast_obj(None, src=i)

    @staticmethod
    def create(
        host: str,
        port: int,
        rank: int,
        world_size: int,
        data_expiration_seconds: int = 3600,
    ) -> "StatelessProcessGroup":
        """A replacement for `torch.distributed.init_process_group` that does not
        pollute the global state.

        If we have process A and process B called `torch.distributed.init_process_group`
        to form a group, and then we want to form another group with process A, B, C,
        D, it is not possible in PyTorch, because process A and process B have already
        formed a group, and process C and process D cannot join that group. This
        function is a workaround for this issue.

        `torch.distributed.init_process_group` is a global call, while this function
        is a stateless call. It will return a `StatelessProcessGroup` object that can be
        used for exchanging metadata. With this function, process A and process B
        can call `StatelessProcessGroup.create` to form a group, and then process A, B,
        C, and D can call `StatelessProcessGroup.create` to form another group.
        """ # noqa
        store = TCPStore(
            host_name=host,
            port=port,
            world_size=world_size,
            is_master=(rank == 0),
        )

        return StatelessProcessGroup(
            rank=rank,
            world_size=world_size,
            store=store,
            data_expiration_seconds=data_expiration_seconds)
