import torch
from transformers import ViTModel, ViTConfig

# === 参数定义 ===
num_action_classes = 10  # 假设动作识别任务有 10 个类别（需根据实际任务调整）
batch_size = 2           # 批量大小
num_frames = 16          # 每个视频采样 16 帧
image_size = 224         # 图像分辨率（需与 ViT 预训练权重匹配）

# === 1. 加载预训练 ViT 模型 ===
config = ViTConfig.from_pretrained("google/vit-base-patch16-224-in21k")
model = ViTModel.from_pretrained("google/vit-base-patch16-224-in21k")

# === 2. 模拟输入数据 ===
# 形状：[batch_size, num_frames, channels, height, width]
video_frames = torch.randn(batch_size, num_frames, 3, image_size, image_size)

# === 3. 逐帧提取特征 ===
temporal_features = []
for frame in range(num_frames):
    # 提取单帧输入（形状：[batch_size, channels, height, width]）
    single_frame = video_frames[:, frame, :, :, :]
    
    # 通过 ViT 获取特征（outputs.last_hidden_state 形状：[batch_size, num_patches+1, hidden_size]）
    outputs = model(single_frame)
    
    # 取 [CLS] 标记的特征（代表全局信息，形状：[batch_size, hidden_size]）
    cls_feature = outputs.last_hidden_state[:, 0, :]
    temporal_features.append(cls_feature)

# 合并时间维度（形状：[batch_size, num_frames, hidden_size]）
temporal_features = torch.stack(temporal_features, dim=1)

# === 4. 时间维度建模（示例：简单均值池化）===
# 更复杂的方法可用 Transformer/LSTM 替代
video_embedding = temporal_features.mean(dim=1)  # 形状：[batch_size, hidden_size]

# === 5. 定义分类器 ===
classifier = torch.nn.Linear(config.hidden_size, num_action_classes)

# === 6. 计算分类结果 ===
logits = classifier(video_embedding)  # 形状：[batch_size, num_action_classes]