File size: 7,620 Bytes
c94c8c9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
import torch
import torch.nn as nn

from modules.build import GROUNDING_REGISTRY
from modules.layers.transformers import (TransformerDecoderLayer,
                                         TransformerEncoderLayer,
                                         TransformerSpatialDecoderLayer)
from modules.utils import layer_repeat, calc_pairwise_locs
from modules.weights import _init_weights_bert


@GROUNDING_REGISTRY.register()
class EntitySpatialCrossEncoder(nn.Module):
    """
       spatial_dim: spatial feature dim, used to modify attention
       dim_loc:
    """

    def __init__(self, cfg, hidden_size=768, num_attention_heads=12, spatial_dim=5, num_layers=4, dim_loc=6,
                 pairwise_rel_type='center'):
        super().__init__()
        decoder_layer = TransformerSpatialDecoderLayer(hidden_size, num_attention_heads, dim_feedforward=2048,
                                                       dropout=0.1, activation='gelu',
                                                       spatial_dim=spatial_dim, spatial_multihead=True,
                                                       spatial_attn_fusion='cond')
        self.layers = layer_repeat(decoder_layer, num_layers)
        loc_layer = nn.Sequential(
            nn.Linear(dim_loc, hidden_size),
            nn.LayerNorm(hidden_size),
        )
        self.loc_layers = layer_repeat(loc_layer, 1)
        self.pairwise_rel_type = pairwise_rel_type
        self.spatial_dim = spatial_dim
        self.spatial_dist_norm = True
        self.apply(_init_weights_bert)

    def forward(
            self, txt_embeds, txt_masks, obj_embeds, obj_locs, obj_masks,
            output_attentions=False, output_hidden_states=False, **kwargs
    ):
        pairwise_locs = calc_pairwise_locs(
            obj_locs[:, :, :3], obj_locs[:, :, 3:],
            pairwise_rel_type=self.pairwise_rel_type
        )

        out_embeds = obj_embeds
        for i, layer in enumerate(self.layers):
            query_pos = self.loc_layers[0](obj_locs)
            out_embeds = out_embeds + query_pos

            out_embeds, self_attn_matrices, cross_attn_matrices = layer(
                out_embeds, txt_embeds, pairwise_locs,
                tgt_key_padding_mask=obj_masks.logical_not(),
                memory_key_padding_mask=txt_masks.logical_not(),
            )

        return txt_embeds, out_embeds


@GROUNDING_REGISTRY.register()
class UnifiedSpatialCrossEncoderV1(nn.Module):
    """
       spatial_dim: spatial feature dim, used to modify attention
       dim_loc:
    """

    def __init__(self, cfg, hidden_size=768, num_attention_heads=12, spatial_dim=5, num_layers=4, dim_loc=6,
                 pairwise_rel_type='center'):
        super().__init__()

        pc_encoder_layer = TransformerSpatialDecoderLayer(hidden_size, num_attention_heads, dim_feedforward=2048,
                                                          dropout=0.1, activation='gelu',
                                                          spatial_dim=spatial_dim, spatial_multihead=True,
                                                          spatial_attn_fusion='cond')
        lang_encoder_layer = TransformerDecoderLayer(hidden_size, num_attention_heads)
        self.pc_encoder = layer_repeat(pc_encoder_layer, num_layers)
        self.lang_encoder = layer_repeat(lang_encoder_layer, num_layers)

        loc_layer = nn.Sequential(
            nn.Linear(dim_loc, hidden_size),
            nn.LayerNorm(hidden_size),
        )
        self.loc_layers = layer_repeat(loc_layer, 1)

        self.pairwise_rel_type = pairwise_rel_type
        self.spatial_dim = spatial_dim
        self.spatial_dist_norm = True
        self.apply(_init_weights_bert)

    def forward(
            self, txt_embeds, txt_masks, obj_embeds, obj_locs, obj_masks,
            output_attentions=False, output_hidden_states=False, **kwargs
    ):
        pairwise_locs = calc_pairwise_locs(
            obj_locs[:, :, :3], obj_locs[:, :, 3:],
            pairwise_rel_type=self.pairwise_rel_type
        )

        for i, (pc_layer, lang_layer) in enumerate(zip(self.pc_encoder, self.lang_encoder)):
            query_pos = self.loc_layers[0](obj_locs)
            obj_embeds = obj_embeds + query_pos

            obj_embeds_out, self_attn_matrices, cross_attn_matrices = pc_layer(
                obj_embeds, txt_embeds, pairwise_locs,
                tgt_key_padding_mask=obj_masks.logical_not(),
                memory_key_padding_mask=txt_masks.logical_not(),
            )

            txt_embeds_out, self_attn_matrices, cross_attn_matrices = lang_layer(
                txt_embeds, obj_embeds,
                tgt_key_padding_mask=txt_masks.logical_not(),
                memory_key_padding_mask=obj_masks.logical_not(),
            )

            obj_embeds = obj_embeds_out
            txt_embeds = txt_embeds_out

        return txt_embeds, obj_embeds


@GROUNDING_REGISTRY.register()
class UnifiedSpatialCrossEncoderV2(nn.Module):
    """
       spatial_dim: spatial feature dim, used to modify attention
       dim_loc:
    """

    def __init__(self, cfg, hidden_size=512, dim_feedforward=2048, num_attention_heads=12, num_layers=4, dim_loc=6):
        super().__init__()

        # unfied encoder
        unified_encoder_layer = TransformerEncoderLayer(hidden_size, num_attention_heads, dim_feedforward=dim_feedforward)
        self.unified_encoder = layer_repeat(unified_encoder_layer, num_layers)
        
        # token embedding
        self.token_type_embeddings = nn.Embedding(2,1024)
        self.pm_linear = nn.Linear(768, 1024)
        
        self.apply(_init_weights_bert)

    def forward(
            self, txt_embeds, txt_masks, obj_embeds,
            output_attentions=False, output_hidden_states=False, **kwargs
    ):
        txt_len = txt_embeds.shape[1]
        obj_len = obj_embeds.shape[1]
        
        obj_embeds = self.pm_linear(obj_embeds)
        # dummy mask for objects (all valid)
        obj_masks = torch.ones((obj_embeds.shape[0], obj_len), dtype=torch.bool, device=txt_embeds.device)

        for i, unified_layer in enumerate(self.unified_encoder):
            # ----- Object embeddings -----
            # Only add token type embedding (no spatial loc)
            pc_token_type_ids = torch.ones_like(obj_masks, dtype=torch.long)
            pc_type_embeds = self.token_type_embeddings(pc_token_type_ids)
            obj_embeds = obj_embeds + pc_type_embeds

            # ----- Text embeddings -----
            lang_token_type_ids = torch.zeros_like(txt_masks, dtype=torch.long)
            lang_type_embeds = self.token_type_embeddings(lang_token_type_ids)
            txt_embeds = txt_embeds + lang_type_embeds

            # ----- Fuse modalities -----
            joint_embeds = torch.cat((txt_embeds, obj_embeds), dim=1)
            joint_masks = torch.cat((txt_masks, obj_masks), dim=1)

            # ----- Transformer layer -----
            joint_embeds, self_attn_matrices = unified_layer(
                joint_embeds,
                tgt_key_padding_mask=joint_masks
            )

            # ----- Split back -----
            txt_embeds, obj_embeds = torch.split(joint_embeds, [txt_len, obj_len], dim=1)

        return txt_embeds, obj_embeds



if __name__ == '__main__':
    x = UnifiedSpatialCrossEncoderV2().cuda()
    txt_embeds = torch.zeros((3, 10, 768)).cuda()
    txt_masks = torch.ones((3, 10)).cuda()
    obj_embeds = torch.zeros((3, 10, 768)).cuda()
    obj_locs = torch.ones((3, 10, 6)).cuda()
    obj_masks = torch.ones((3, 10)).cuda()
    x(txt_embeds, txt_masks, obj_embeds, obj_locs, obj_masks)