from collections import UserDict, defaultdict
from typing import (Any, Dict, List, Literal, Mapping, Sequence, Tuple,
                    TypedDict, TypeVar, Union, cast, final)

import numpy as np
import torch
import torch.types
from PIL.Image import Image
from typing_extensions import NotRequired, TypeAlias

from vllm.utils import JSONTree, is_list_of, json_map_leaves

_T = TypeVar("_T")

# yapf: disable
#! 从当前行开始禁用YAPF工具对代码进行格式化，直到'# yapf: enable'行
ImageItem: TypeAlias = Union[Image, np.ndarray, torch.Tensor] #! 1. 多模态图像输入类型
"""
A :class:`transformers.image_utils.ImageInput` representing a single image
item, which can be passed to a HuggingFace :code:`ImageProcessor`.
"""

VideoItem: TypeAlias = Union[                                 #! 2. 多模态视频输入类型
    list[Image],
    np.ndarray,
    torch.Tensor,
    list[np.ndarray],
    list[torch.Tensor],
]
"""
A :class:`transformers.image_utils.VideoInput` representing a single video
item, which can be passed to a HuggingFace :code:`VideoProcessor`.
"""

AudioItem: TypeAlias = Union[                                 #! 3. 多模态音频输入类型
    np.ndarray,
    list[float],
    # `(audio, sampling_rate)`: If the audio's sampling rate is different
    # from that expected by the model, we need to resample it.
    tuple[np.ndarray, float],
]
"""
Represents a single audio
item, which can be passed to a HuggingFace :code:`AudioProcessor`.
"""
# yapf: enable

MultiModalData: TypeAlias = Union[_T, List[_T]]               #! 多模态数据类型
"""
Either a single data item, or a list of data items.

The number of data items allowed per modality is restricted by
:code:`--limit-mm-per-prompt`.
"""

# TODO: delete
# from typing import final 类似Java和C++的final，表示该函数或方法不应该被重写或重载
# 修饰类表示该类是终止类，不应该被继承(不能有子类)
@final
class MultiModalDataBuiltins(TypedDict, total=False):
    # TypedDict用于自定义具有特定字段和类型的字典
    # total=False表示不用包含所有定义的健
    """Type annotations for modality types predefined by vLLM."""

    image: MultiModalData[ImageItem]   #! 等价于 Union[ImageItem, List[ImageItem]]
    """The input image(s)."""

    video: MultiModalData[VideoItem]
    """The input video(s)."""

    audio: MultiModalData[AudioItem]
    """The input audio(s)."""

#! 输入input的多模态输入字典，就是一个泛型的字典
MultiModalDataDict: TypeAlias = Mapping[str, MultiModalData[Any]]
"""
A dictionary containing an entry for each modality type to input.

Note:
    This dictionary also accepts modality keys defined outside
    :class:`MultiModalDataBuiltins` as long as a customized plugin
    is registered through the :class:`~vllm.multimodal.MULTIMODAL_REGISTRY`.
    Read more on that :ref:`here <adding-multimodal-plugin>`.
"""

#! 多模态数据的位置信息
class PlaceholderRange(TypedDict):
    """
    Placeholder location information for multi-modal data.

    For example:
        Prompt: AAAA BBBB What is in these images?
        Images A and B will have:
            A: { "offset": 0, "length": 4 }
            B: { "offset": 5, "length": 4 }
    """

    offset: int
    """The start index of the placeholder in the prompt."""

    length: int
    """The length of the placeholder."""


#! List["NestedTensors"]向前引用： 用于在类型尚未定义的情况下进行类型提示
NestedTensors = Union[List["NestedTensors"], List[torch.Tensor], torch.Tensor,
                      Tuple[torch.Tensor, ...]]  # 动态长度的Tuple
"""
Uses a list instead of a tensor if the dimensions of each element do not match.
"""

BatchedTensorInputs: TypeAlias = Dict[str, NestedTensors]
"""
A dictionary containing nested tensors which have been batched via
:meth:`MultiModalKwargs.batch`.
"""


#! UserDict封装了一个普通的字典，提供了完整的字典接口，用于被继承再扩展或重写其方法
class MultiModalKwargs(UserDict[str, NestedTensors]):
    """
    A dictionary that represents the keyword arguments to
    :meth:`~torch.nn.Module.forward`.
    """

    #! 尝试将张量元组或列表batch成为一个大张量
    # TODO: 是否有索引顺序的变换(索引顺序与请求id强相关)
    @staticmethod                  # 静态方法
    def _try_stack(nested_tensors: NestedTensors) -> NestedTensors:
        """
        Stack the inner dimensions that have the same shape in
        a nested list of tensors.

        Thus, a dimension represented by a list means that the inner
        dimensions are different for each element along that dimension.
        """
        if isinstance(nested_tensors, torch.Tensor):
            return nested_tensors

        # TODO: Remove these once all models have been migrated
        if isinstance(nested_tensors, np.ndarray):
            return torch.from_numpy(nested_tensors)
        if isinstance(nested_tensors, (int, float)):
            return torch.tensor(nested_tensors)

        stacked = [MultiModalKwargs._try_stack(t) for t in nested_tensors]
        if not is_list_of(stacked, torch.Tensor, check="all"):
            # Only tensors (not lists) can be stacked.
            return stacked

        tensors_ = cast(List[torch.Tensor], stacked)
        if any(t.shape != tensors_[0].shape for t in tensors_):
            # The tensors have incompatible shapes and can't be stacked.
            return tensors_

        return torch.stack(tensors_)

    @staticmethod
    def batch(inputs_list: List["MultiModalKwargs"]) -> BatchedTensorInputs:
        """
        Batch multiple inputs together into a dictionary.

        The resulting dictionary has the same keys as the inputs.
        If the corresponding value from each input is a tensor and they all
        share the same shape, the output value is a single batched tensor;
        otherwise, the output value is a list containing the original value
        from each input.
        """
        if len(inputs_list) == 0:
            return {}

        # We need to consider the case where each item in the batch
        # contains different modalities (i.e. different keys).
        item_lists: Dict[str, List[NestedTensors]] = defaultdict(list)

        for inputs in inputs_list:
            for k, v in inputs.items():
                item_lists[k].append(v)

        return {
            k: MultiModalKwargs._try_stack(item_list)
            for k, item_list in item_lists.items()
        }

    @staticmethod
    def as_kwargs(
        batched_inputs: BatchedTensorInputs,
        *,
        device: torch.types.Device,
    ) -> BatchedTensorInputs:
        json_inputs = cast(JSONTree[torch.Tensor], batched_inputs)

        json_mapped = json_map_leaves(
            lambda x: x.to(device, non_blocking=True),
            json_inputs,
        )

        return cast(BatchedTensorInputs, json_mapped)


MultiModalPlaceholderDict = Mapping[str, Sequence[PlaceholderRange]]
"""
A dictionary containing placeholder ranges.
"""

"""
# TODO: delete this
#! 返回数据的三个重要信息
inputs = tokenizer("This is a short sentence.", 
                padding="max_length", max_length=10, return_tensors="pt")

input_ids：     表示输入文本分割成的 token 的 ID。          e.g. tensor([[  101, 2023, 2003, 1037, 3231, 2000, 2054,  102]])
token_type_ids：用于区分输入文本中的不同句子或段落。          e.g. tensor([[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]])
attention_mask：用于指示哪些 token 是真实的文本，哪些是填充。 e.g. tensor([[1, 1, 1, 1, 1, 1, 1, 1, 0, 0]]) 0 表示填充的部分不用计算注意力
"""
class MultiModalInputsV2(TypedDict):
    """
    Represents the outputs of :class:`vllm.multimodal.MultiModalProcessor`,
    ready to be passed to vLLM internals.

    """

    type: Literal["multimodal"]
    """The type of inputs."""

    prompt: str
    """The processed prompt text."""

    prompt_token_ids: List[int]
    """The processed token IDs which includes placeholder tokens."""

    token_type_ids: NotRequired[List[int]]
    """The token type IDs of the prompt."""

    mm_kwargs: MultiModalKwargs
    """Keyword arguments to be directly passed to the model after batching."""

    mm_hashes: NotRequired[List[str]]
    """The hashes of the multi-modal data."""

    mm_placeholders: MultiModalPlaceholderDict
    """
    For each modality, information about the placeholder tokens in
    :code:`prompt_token_ids`.
    """