|
|
from modules.layers.transformers import (TransformerDecoderLayer, |
|
|
TransformerEncoderLayer, |
|
|
TransformerSpatialDecoderLayer) |
|
|
|
|
|
class UnifiedSpatialCrossEncoderV2(nn.Module): |
|
|
""" |
|
|
spatial_dim: spatial feature dim, used to modify attention |
|
|
dim_loc: |
|
|
""" |
|
|
|
|
|
def __init__(self, cfg, hidden_size=768, dim_feedforward=2048, num_attention_heads=12, num_layers=4, dim_loc=6): |
|
|
super().__init__() |
|
|
|
|
|
|
|
|
unified_encoder_layer = TransformerEncoderLayer(hidden_size, num_attention_heads, dim_feedforward=dim_feedforward) |
|
|
self.unified_encoder = layer_repeat(unified_encoder_layer, num_layers) |
|
|
|
|
|
|
|
|
loc_layer = nn.Sequential( |
|
|
nn.Linear(dim_loc, hidden_size), |
|
|
nn.LayerNorm(hidden_size), |
|
|
) |
|
|
self.loc_layers = layer_repeat(loc_layer, 1) |
|
|
|
|
|
|
|
|
self.token_type_embeddings = nn.Embedding(2, hidden_size) |
|
|
|
|
|
self.apply(_init_weights_bert) |
|
|
|
|
|
def forward( |
|
|
self, txt_embeds, txt_masks, obj_embeds, obj_locs, obj_masks, |
|
|
output_attentions=False, output_hidden_states=False, **kwargs |
|
|
): |
|
|
txt_len = txt_embeds.shape[1] |
|
|
obj_len = obj_embeds.shape[1] |
|
|
|
|
|
for i, unified_layer in enumerate(self.unified_encoder): |
|
|
|
|
|
query_pos = self.loc_layers[0](obj_locs) |
|
|
pc_token_type_ids = torch.ones((obj_embeds.shape[0:2])).long().cuda() |
|
|
pc_type_embeds = self.token_type_embeddings(pc_token_type_ids) |
|
|
obj_embeds = obj_embeds + query_pos + pc_type_embeds |
|
|
|
|
|
|
|
|
lang_token_type_ids = torch.zeros((txt_embeds.shape[0:2])).long().cuda() |
|
|
lang_type_embeds = self.token_type_embeddings(lang_token_type_ids) |
|
|
txt_embeds = txt_embeds + lang_type_embeds |
|
|
|
|
|
|
|
|
joint_embeds = torch.cat((txt_embeds, obj_embeds), dim=1) |
|
|
joint_masks = torch.cat((txt_masks, obj_masks), dim=1) |
|
|
|
|
|
|
|
|
joint_embeds, self_attn_matrices = unified_layer(joint_embeds, |
|
|
tgt_key_padding_mask=joint_masks.logical_not()) |
|
|
|
|
|
|
|
|
txt_embeds, obj_embeds = torch.split(joint_embeds, [txt_len, obj_len], dim=1) |
|
|
|
|
|
return txt_embeds, obj_embeds |