File size: 2,481 Bytes
c94c8c9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
from modules.layers.transformers import (TransformerDecoderLayer,
                                         TransformerEncoderLayer,
                                         TransformerSpatialDecoderLayer)

class UnifiedSpatialCrossEncoderV2(nn.Module):
    """
       spatial_dim: spatial feature dim, used to modify attention
       dim_loc:
    """

    def __init__(self, cfg, hidden_size=768, dim_feedforward=2048, num_attention_heads=12, num_layers=4, dim_loc=6):
        super().__init__()

        # unfied encoder
        unified_encoder_layer = TransformerEncoderLayer(hidden_size, num_attention_heads, dim_feedforward=dim_feedforward)
        self.unified_encoder = layer_repeat(unified_encoder_layer, num_layers)

        # loc layer
        loc_layer = nn.Sequential(
            nn.Linear(dim_loc, hidden_size),
            nn.LayerNorm(hidden_size),
        )
        self.loc_layers = layer_repeat(loc_layer, 1)

        # token embedding
        self.token_type_embeddings = nn.Embedding(2, hidden_size)

        self.apply(_init_weights_bert)

    def forward(
            self, txt_embeds, txt_masks, obj_embeds, obj_locs, obj_masks,
            output_attentions=False, output_hidden_states=False, **kwargs
    ):
        txt_len = txt_embeds.shape[1]
        obj_len = obj_embeds.shape[1]

        for i, unified_layer in enumerate(self.unified_encoder):
            # add embeddings for points
            query_pos = self.loc_layers[0](obj_locs)
            pc_token_type_ids = torch.ones((obj_embeds.shape[0:2])).long().cuda()
            pc_type_embeds = self.token_type_embeddings(pc_token_type_ids)
            obj_embeds = obj_embeds + query_pos + pc_type_embeds

            # add embeddings for languages
            lang_token_type_ids = torch.zeros((txt_embeds.shape[0:2])).long().cuda()
            lang_type_embeds = self.token_type_embeddings(lang_token_type_ids)
            txt_embeds = txt_embeds + lang_type_embeds

            # fuse embeddings
            joint_embeds = torch.cat((txt_embeds, obj_embeds), dim=1)
            joint_masks = torch.cat((txt_masks, obj_masks), dim=1)

            # transformer
            joint_embeds, self_attn_matrices = unified_layer(joint_embeds,
                                                             tgt_key_padding_mask=joint_masks.logical_not())

            # split
            txt_embeds, obj_embeds = torch.split(joint_embeds, [txt_len, obj_len], dim=1)

        return txt_embeds, obj_embeds