|
import numpy as np |
|
import torch |
|
import logging |
|
|
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_3d_sincos_pos_embed(embed_dim, grid_size, t_size, cls_token=False): |
|
""" |
|
grid_size: int of the grid height and width |
|
t_size: int of the temporal size |
|
return: |
|
pos_embed: [t_size*grid_size*grid_size, embed_dim] or [1+t_size*grid_size*grid_size, embed_dim] (w/ or w/o cls_token) |
|
""" |
|
assert embed_dim % 4 == 0 |
|
embed_dim_spatial = embed_dim // 4 * 3 |
|
embed_dim_temporal = embed_dim // 4 |
|
|
|
|
|
grid_h = np.arange(grid_size, dtype=np.float32) |
|
grid_w = np.arange(grid_size, dtype=np.float32) |
|
grid = np.meshgrid(grid_w, grid_h) |
|
grid = np.stack(grid, axis=0) |
|
|
|
grid = grid.reshape([2, 1, grid_size, grid_size]) |
|
pos_embed_spatial = get_2d_sincos_pos_embed_from_grid( |
|
embed_dim_spatial, grid |
|
) |
|
|
|
|
|
grid_t = np.arange(t_size, dtype=np.float32) |
|
pos_embed_temporal = get_1d_sincos_pos_embed_from_grid( |
|
embed_dim_temporal, grid_t |
|
) |
|
|
|
|
|
pos_embed_temporal = pos_embed_temporal[:, np.newaxis, :] |
|
pos_embed_temporal = np.repeat( |
|
pos_embed_temporal, grid_size**2, axis=1 |
|
) |
|
pos_embed_spatial = pos_embed_spatial[np.newaxis, :, :] |
|
pos_embed_spatial = np.repeat( |
|
pos_embed_spatial, t_size, axis=0 |
|
) |
|
|
|
pos_embed = np.concatenate([pos_embed_temporal, pos_embed_spatial], axis=-1) |
|
pos_embed = pos_embed.reshape([-1, embed_dim]) |
|
|
|
if cls_token: |
|
pos_embed = np.concatenate( |
|
[np.zeros([1, embed_dim]), pos_embed], axis=0 |
|
) |
|
return pos_embed |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False): |
|
""" |
|
grid_size: int of the grid height and width |
|
return: |
|
pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token) |
|
""" |
|
grid_h = np.arange(grid_size, dtype=np.float32) |
|
grid_w = np.arange(grid_size, dtype=np.float32) |
|
grid = np.meshgrid(grid_w, grid_h) |
|
grid = np.stack(grid, axis=0) |
|
|
|
grid = grid.reshape([2, 1, grid_size, grid_size]) |
|
pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid) |
|
if cls_token: |
|
pos_embed = np.concatenate( |
|
[np.zeros([1, embed_dim]), pos_embed], axis=0 |
|
) |
|
return pos_embed |
|
|
|
|
|
def get_1d_sincos_pos_embed(embed_dim, t_size, cls_token=False): |
|
""" |
|
t_size: int of the temporal size |
|
return: |
|
pos_embed: [t_size, embed_dim] or [1+t_size, embed_dim] (w/ or w/o cls_token) |
|
""" |
|
grid_t = np.arange(t_size, dtype=np.float32) |
|
pos_embed = get_1d_sincos_pos_embed_from_grid(embed_dim, grid_t) |
|
if cls_token: |
|
pos_embed = np.concatenate( |
|
[np.zeros([1, embed_dim]), pos_embed], axis=0 |
|
) |
|
return pos_embed |
|
|
|
|
|
def get_2d_sincos_pos_embed_from_grid(embed_dim, grid): |
|
assert embed_dim % 2 == 0 |
|
|
|
|
|
emb_h = get_1d_sincos_pos_embed_from_grid( |
|
embed_dim // 2, grid[0] |
|
) |
|
emb_w = get_1d_sincos_pos_embed_from_grid( |
|
embed_dim // 2, grid[1] |
|
) |
|
|
|
emb = np.concatenate([emb_h, emb_w], axis=1) |
|
return emb |
|
|
|
|
|
def get_1d_sincos_pos_embed_from_grid(embed_dim, pos): |
|
""" |
|
embed_dim: output dimension for each position |
|
pos: a list of positions to be encoded: size (M,) |
|
out: (M, D) |
|
""" |
|
assert embed_dim % 2 == 0 |
|
omega = np.arange(embed_dim // 2, dtype=np.float32) |
|
omega /= embed_dim / 2.0 |
|
omega = 1.0 / 10000**omega |
|
|
|
pos = pos.reshape(-1) |
|
out = np.einsum("m,d->md", pos, omega) |
|
|
|
emb_sin = np.sin(out) |
|
emb_cos = np.cos(out) |
|
|
|
emb = np.concatenate([emb_sin, emb_cos], axis=1) |
|
return emb |
|
|
|
|
|
def interpolate_pos_embed(checkpoint_model, model, orig_t_size=4, pos_name='vision_encoder.pos_embed'): |
|
if pos_name in checkpoint_model: |
|
pos_embed_checkpoint = checkpoint_model[pos_name] |
|
embedding_size = pos_embed_checkpoint.shape[-1] |
|
num_patches = model.patch_embed.num_patches |
|
num_extra_tokens = model.pos_embed.shape[-2] - num_patches |
|
|
|
|
|
new_t_size = model.T |
|
|
|
orig_size = int(((pos_embed_checkpoint.shape[-2] - num_extra_tokens)//(orig_t_size)) ** 0.5) |
|
|
|
new_size = int((num_patches // (new_t_size))** 0.5) |
|
|
|
|
|
if orig_t_size != new_t_size: |
|
logger.info(f"Temporal interpolate from {orig_t_size} to {new_t_size} ({pos_name})") |
|
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens] |
|
|
|
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:] |
|
|
|
pos_tokens = pos_tokens.view(1, orig_t_size, -1, embedding_size) |
|
pos_tokens = pos_tokens.permute(0, 2, 3, 1).reshape(-1, embedding_size, orig_t_size) |
|
pos_tokens = torch.nn.functional.interpolate(pos_tokens, size=new_t_size, mode='linear') |
|
pos_tokens = pos_tokens.view(1, -1, embedding_size, new_t_size) |
|
pos_tokens = pos_tokens.permute(0, 3, 1, 2).reshape(1, -1, embedding_size) |
|
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1) |
|
checkpoint_model[pos_name] = new_pos_embed |
|
pos_embed_checkpoint = new_pos_embed |
|
|
|
|
|
if orig_size != new_size: |
|
logger.info(f"Position interpolate from {orig_size}x{orig_size} to {new_size}x{new_size} ({pos_name})") |
|
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens] |
|
|
|
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:] |
|
|
|
pos_tokens = pos_tokens.reshape(-1, new_t_size, orig_size, orig_size, embedding_size) |
|
pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2) |
|
pos_tokens = torch.nn.functional.interpolate( |
|
pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False) |
|
|
|
pos_tokens = pos_tokens.permute(0, 2, 3, 1).reshape(-1, new_t_size, new_size, new_size, embedding_size) |
|
pos_tokens = pos_tokens.flatten(1, 3) |
|
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1) |
|
checkpoint_model[pos_name] = new_pos_embed |
|
|
|
|
|
def interpolate_pos_embed_internvideo2(checkpoint_model, model, orig_t_size = 8): |
|
|
|
for pos_name in ['pos_embed', 'clip_pos_embed']: |
|
if pos_name in checkpoint_model: |
|
pos_embed_checkpoint = checkpoint_model[pos_name] |
|
embedding_size = pos_embed_checkpoint.shape[-1] |
|
num_patches = model.patch_embed.num_patches |
|
num_extra_tokens = model.pos_embed.shape[-2] - num_patches |
|
|
|
|
|
|
|
new_t_size = model.num_frames // model.tubelet_size |
|
|
|
orig_size = int(((pos_embed_checkpoint.shape[-2] - num_extra_tokens)//(orig_t_size)) ** 0.5) |
|
|
|
new_size = int((num_patches // (new_t_size))** 0.5) |
|
|
|
|
|
if orig_t_size != new_t_size: |
|
logger.info(f"Temporal interpolate from {orig_t_size} to {new_t_size} ({pos_name})") |
|
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens] |
|
|
|
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:] |
|
|
|
pos_tokens = pos_tokens.view(1, orig_t_size, -1, embedding_size) |
|
pos_tokens = pos_tokens.permute(0, 2, 3, 1).reshape(-1, embedding_size, orig_t_size) |
|
pos_tokens = torch.nn.functional.interpolate(pos_tokens, size=new_t_size, mode='linear') |
|
pos_tokens = pos_tokens.view(1, -1, embedding_size, new_t_size) |
|
pos_tokens = pos_tokens.permute(0, 3, 1, 2).reshape(1, -1, embedding_size) |
|
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1) |
|
checkpoint_model[pos_name] = new_pos_embed |
|
pos_embed_checkpoint = new_pos_embed |
|
|
|
|
|
if orig_size != new_size: |
|
logger.info(f"Position interpolate from {orig_size}x{orig_size} to {new_size}x{new_size} ({pos_name})") |
|
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens] |
|
|
|
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:] |
|
|
|
pos_tokens = pos_tokens.reshape(-1, new_t_size, orig_size, orig_size, embedding_size) |
|
pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2) |
|
pos_tokens = torch.nn.functional.interpolate( |
|
pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False) |
|
|
|
pos_tokens = pos_tokens.permute(0, 2, 3, 1).reshape(-1, new_t_size, new_size, new_size, embedding_size) |
|
pos_tokens = pos_tokens.flatten(1, 3) |
|
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1) |
|
checkpoint_model[pos_name] = new_pos_embed |
|
|
|
if 'pos_embed_spatial' in checkpoint_model or 'pos_embed_temporal' in checkpoint_model: |
|
raise NotImplementedError |
|
|
|
|
|
def interpolate_pos_embed_internvideo2_new(checkpoint_model, model, orig_t_size = 8): |
|
pos_names = [] |
|
for k in checkpoint_model.keys(): |
|
if ('pos_embed' in k or 'clip_pos_embed' in k) and 'img_pos_embed' not in k: |
|
pos_names.append(k) |
|
|
|
logger.info(f"pos names list for interpolating: {pos_names}") |
|
|
|
assert len(pos_names) > 0, checkpoint_model.keys() |
|
|
|
if 'pos_embed_spatial' in checkpoint_model.keys() or 'pos_embed_temporal' in checkpoint_model.keys(): |
|
raise NotImplementedError |
|
|
|
|
|
for pos_name in pos_names: |
|
|
|
pos_embed_checkpoint = checkpoint_model[pos_name] |
|
embedding_size = pos_embed_checkpoint.shape[-1] |
|
num_patches = model.patch_embed.num_patches |
|
num_extra_tokens = model.pos_embed.shape[-2] - num_patches |
|
|
|
|
|
|
|
new_t_size = model.num_frames // model.tubelet_size |
|
|
|
orig_size = int(((pos_embed_checkpoint.shape[-2] - num_extra_tokens)//(orig_t_size)) ** 0.5) |
|
|
|
new_size = int((num_patches // (new_t_size))** 0.5) |
|
|
|
|
|
if orig_t_size != new_t_size: |
|
logger.info(f"Temporal interpolate from {orig_t_size} to {new_t_size} ({pos_name})") |
|
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens] |
|
|
|
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:] |
|
|
|
pos_tokens = pos_tokens.view(1, orig_t_size, -1, embedding_size) |
|
pos_tokens = pos_tokens.permute(0, 2, 3, 1).reshape(-1, embedding_size, orig_t_size) |
|
pos_tokens = torch.nn.functional.interpolate(pos_tokens, size=new_t_size, mode='linear') |
|
pos_tokens = pos_tokens.view(1, -1, embedding_size, new_t_size) |
|
pos_tokens = pos_tokens.permute(0, 3, 1, 2).reshape(1, -1, embedding_size) |
|
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1) |
|
checkpoint_model[pos_name] = new_pos_embed |
|
pos_embed_checkpoint = new_pos_embed |
|
|
|
|
|
if orig_size != new_size: |
|
logger.info(f"Position interpolate from {orig_size}x{orig_size} to {new_size}x{new_size} ({pos_name})") |
|
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens] |
|
|
|
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:] |
|
|
|
pos_tokens = pos_tokens.reshape(-1, new_t_size, orig_size, orig_size, embedding_size) |
|
pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2) |
|
pos_tokens = torch.nn.functional.interpolate( |
|
pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False) |
|
|
|
pos_tokens = pos_tokens.permute(0, 2, 3, 1).reshape(-1, new_t_size, new_size, new_size, embedding_size) |
|
pos_tokens = pos_tokens.flatten(1, 3) |
|
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1) |
|
checkpoint_model[pos_name] = new_pos_embed |