import torch

from dataclasses import dataclass


# 输入的多视角图片以及对应的内外参数
@dataclass
class InputFrameData:
    """
    Each entry corresponds to data from a camera. The data here doesn't contain
    which images are observing the same hand. InputFrameDesc is used to assemble
    features observing the same hands.

    * left_images (shape [n_images, h, w])
    * intrinsics (shape [n_images, 3, 3])
    * extrinsics_xf (shape [n_images, 4, 4])
    """

    left_images: torch.Tensor
    intrinsics: torch.Tensor
    extrinsics_xf: torch.Tensor


# 切分视图：用 sample_range 从多摄像头输入中挑出做单/多视角融合的子集。
# 时序追踪：通过 (memory_idx, use_memory) 控制每只手在不同帧之间的状态传递。
# 左右手区分：用 hand_idx 标记左右手，使得网络只需学“左手”，而右手结果通过简单镜像获得。
# per-frame data descriptions, could potentially
# create another struct InputFrameDescription
@dataclass
class InputFrameDesc:
    """
    Descriptions for InputFrameData. Each tensor should
    bs: batch_size

    * sample_range (shape [bs, 2]): the 2 columns are the starting and
        ending indices. Example: a tensor [[0, 2], [2, 3]] means first sample
        corresponds to left_images[0:2] which is a multi-view sample and second
        sample corresponds to lefts_images[2:3] which is a single-view sample
    * memory_idx (shape [bs]): only applicable with a valid _temporal field. In
        rum-time if we have tracking for 2 hands, this tensor could be [0, 1].
        If the next frame left hand loses track, this memory_idx could become [1] tensor
    * use_memory (shape [bs]): a boolean tensor indicating whether to use the memory
        features for this sample
    * hand_idx (shape [bs]): hand index for each sample. There is a chance to factor this out.
    """

    sample_range: torch.Tensor
    memory_idx: torch.Tensor
    use_memory: torch.Tensor
    hand_idx: torch.Tensor


# 预定义的骨骼数据
@dataclass
class InputSkeletonData:
    """
    Descriptions for InputFrameData

    * joint_rotation_axes (shape [bs, 22, 3]): 22 joint axes
    * joint_rest_positions (shape [bs, 22, 3]): 22 joint positions in rest pose
    """

    joint_rotation_axes: torch.Tensor
    joint_rest_positions: torch.Tensor
