|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import numpy as np |
|
|
|
import torch |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False): |
|
""" |
|
grid_size: int of the grid height and width |
|
return: |
|
pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token) |
|
""" |
|
grid_h = np.arange(grid_size, dtype=np.float32) |
|
grid_w = np.arange(grid_size, dtype=np.float32) |
|
grid = np.meshgrid(grid_w, grid_h) |
|
grid = np.stack(grid, axis=0) |
|
|
|
grid = grid.reshape([2, 1, grid_size, grid_size]) |
|
pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid) |
|
if cls_token: |
|
pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0) |
|
return pos_embed |
|
|
|
|
|
def get_2d_sincos_pos_embed_flexible(embed_dim, grid_size, cls_token=False): |
|
""" |
|
grid_size: int of the grid height and width |
|
return: |
|
pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token) |
|
""" |
|
grid_h = np.arange(grid_size[0], dtype=np.float32) |
|
grid_w = np.arange(grid_size[1], dtype=np.float32) |
|
grid = np.meshgrid(grid_w, grid_h) |
|
grid = np.stack(grid, axis=0) |
|
|
|
grid = grid.reshape([2, 1, grid_size[0], grid_size[1]]) |
|
pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid) |
|
if cls_token: |
|
pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0) |
|
return pos_embed |
|
|
|
|
|
def get_2d_sincos_pos_embed_from_grid(embed_dim, grid): |
|
assert embed_dim % 2 == 0 |
|
|
|
|
|
emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) |
|
emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) |
|
|
|
emb = np.concatenate([emb_h, emb_w], axis=1) |
|
return emb |
|
|
|
|
|
def get_1d_sincos_pos_embed_from_grid(embed_dim, pos): |
|
""" |
|
embed_dim: output dimension for each position |
|
pos: a list of positions to be encoded: size (M,) |
|
out: (M, D) |
|
""" |
|
assert embed_dim % 2 == 0 |
|
|
|
omega = np.arange(embed_dim // 2, dtype=float) |
|
omega /= embed_dim / 2.0 |
|
omega = 1.0 / 10000**omega |
|
|
|
pos = pos.reshape(-1) |
|
out = np.einsum("m,d->md", pos, omega) |
|
|
|
emb_sin = np.sin(out) |
|
emb_cos = np.cos(out) |
|
|
|
emb = np.concatenate([emb_sin, emb_cos], axis=1) |
|
return emb |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def interpolate_pos_embed(model, checkpoint_model): |
|
if "pos_embed" in checkpoint_model: |
|
pos_embed_checkpoint = checkpoint_model["pos_embed"] |
|
embedding_size = pos_embed_checkpoint.shape[-1] |
|
num_patches = model.patch_embed.num_patches |
|
num_extra_tokens = model.pos_embed.shape[-2] - num_patches |
|
|
|
orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5) |
|
|
|
new_size = int(num_patches**0.5) |
|
|
|
if orig_size != new_size: |
|
print( |
|
"Position interpolate from %dx%d to %dx%d" |
|
% (orig_size, orig_size, new_size, new_size) |
|
) |
|
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens] |
|
|
|
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:] |
|
pos_tokens = pos_tokens.reshape( |
|
-1, orig_size, orig_size, embedding_size |
|
).permute(0, 3, 1, 2) |
|
pos_tokens = torch.nn.functional.interpolate( |
|
pos_tokens, |
|
size=(new_size, new_size), |
|
mode="bicubic", |
|
align_corners=False, |
|
) |
|
pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2) |
|
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1) |
|
checkpoint_model["pos_embed"] = new_pos_embed |
|
|
|
|
|
def interpolate_pos_embed_img2audio(model, checkpoint_model, orig_size, new_size): |
|
if "pos_embed" in checkpoint_model: |
|
pos_embed_checkpoint = checkpoint_model["pos_embed"] |
|
embedding_size = pos_embed_checkpoint.shape[-1] |
|
num_patches = model.patch_embed.num_patches |
|
num_extra_tokens = model.pos_embed.shape[-2] - num_patches |
|
|
|
|
|
|
|
|
|
|
|
if orig_size != new_size: |
|
print( |
|
"Position interpolate from %dx%d to %dx%d" |
|
% (orig_size[0], orig_size[1], new_size[0], new_size[1]) |
|
) |
|
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens] |
|
|
|
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:] |
|
pos_tokens = pos_tokens.reshape( |
|
-1, orig_size[0], orig_size[1], embedding_size |
|
).permute(0, 3, 1, 2) |
|
pos_tokens = torch.nn.functional.interpolate( |
|
pos_tokens, |
|
size=(new_size[0], new_size[1]), |
|
mode="bicubic", |
|
align_corners=False, |
|
) |
|
pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2) |
|
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1) |
|
checkpoint_model["pos_embed"] = new_pos_embed |
|
|
|
|
|
def interpolate_pos_embed_audio(model, checkpoint_model, orig_size, new_size): |
|
if "pos_embed" in checkpoint_model: |
|
pos_embed_checkpoint = checkpoint_model["pos_embed"] |
|
embedding_size = pos_embed_checkpoint.shape[-1] |
|
num_patches = model.patch_embed.num_patches |
|
num_extra_tokens = model.pos_embed.shape[-2] - num_patches |
|
if orig_size != new_size: |
|
print( |
|
"Position interpolate from %dx%d to %dx%d" |
|
% (orig_size[0], orig_size[1], new_size[0], new_size[1]) |
|
) |
|
|
|
|
|
cls_token = pos_embed_checkpoint[:, 0, :].unsqueeze(1) |
|
pos_tokens = pos_embed_checkpoint[:, 1:, :] |
|
pos_tokens = pos_tokens.reshape( |
|
-1, orig_size[0], orig_size[1], embedding_size |
|
) |
|
|
|
|
|
|
|
|
|
pos_tokens = pos_tokens[:, :, : new_size[1], :] |
|
pos_tokens = pos_tokens.flatten(1, 2) |
|
new_pos_embed = torch.cat((cls_token, pos_tokens), dim=1) |
|
checkpoint_model["pos_embed"] = new_pos_embed |
|
|
|
|
|
def interpolate_patch_embed_audio( |
|
model, |
|
checkpoint_model, |
|
orig_channel, |
|
new_channel=1, |
|
kernel_size=(16, 16), |
|
stride=(16, 16), |
|
padding=(0, 0), |
|
): |
|
if orig_channel != new_channel: |
|
if "patch_embed.proj.weight" in checkpoint_model: |
|
|
|
new_proj_weight = torch.nn.Parameter( |
|
torch.sum(checkpoint_model["patch_embed.proj.weight"], dim=1).unsqueeze( |
|
1 |
|
) |
|
) |
|
checkpoint_model["patch_embed.proj.weight"] = new_proj_weight |
|
|