backup / modules /layers /transformers.py
MatchLab's picture
Upload folder using huggingface_hub
c94c8c9 verified
from typing import Optional
import einops
import numpy as np
import torch
import torch.nn.functional as F
from torch import Tensor, nn
from modules.utils import get_activation_fn
class CrossAttentionLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation="relu",
k_dim=None, v_dim=None, prenorm=True):
super().__init__()
if k_dim is None:
k_dim = d_model
if v_dim is None:
v_dim = d_model
self.prenorm = prenorm
self.multihead_attn = nn.MultiheadAttention(
d_model, nhead, dropout=dropout, batch_first=True, kdim=k_dim, vdim=v_dim
)
# Implementation of Feedforward modules
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.activation = get_activation_fn(activation)
def forward(
self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
):
tgt2 = tgt
if self.prenorm:
tgt2 = self.norm1(tgt2)
tgt2, cross_attn_matrices = self.multihead_attn(
query=tgt2, key=memory,
value=memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask
)
tgt = tgt + self.dropout2(tgt2)
if not self.prenorm:
tgt = self.norm1(tgt)
if self.prenorm:
tgt2 = self.norm3(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
tgt = tgt + self.dropout3(tgt2)
if not self.prenorm:
tgt = self.norm3(tgt)
return tgt, cross_attn_matrices
class TransformerDecoderLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation="relu"):
super().__init__()
self.self_attn = nn.MultiheadAttention(
d_model, nhead, dropout=dropout, batch_first=True
)
self.multihead_attn = nn.MultiheadAttention(
d_model, nhead, dropout=dropout, batch_first=True
)
# Implementation of Feedforward modules
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.activation = get_activation_fn(activation)
def forward(
self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
):
tgt2 = self.norm1(tgt)
tgt2, self_attn_matrices = self.self_attn(
query=tgt2, key=tgt2, value=tgt2, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask
)
tgt = tgt + self.dropout1(tgt2)
tgt2 = self.norm2(tgt)
tgt2, cross_attn_matrices = self.multihead_attn(
query=tgt2, key=memory,
value=memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask
)
tgt = tgt + self.dropout2(tgt2)
tgt2 = self.norm3(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
tgt = tgt + self.dropout3(tgt2)
return tgt, self_attn_matrices, cross_attn_matrices
class TransformerEncoderLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, batch_first=True, dropout=0.1, activation="relu", prenorm=False):
super().__init__()
self.self_attn = nn.MultiheadAttention(
d_model, nhead, dropout=dropout, batch_first=batch_first
)
# Implementation of Feedforward modules
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = get_activation_fn(activation)
self.prenorm = prenorm
def forward(
self, tgt, tgt_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
):
tgt2 = tgt
if self.prenorm:
tgt2 = self.norm1(tgt2)
tgt2, self_attn_matrices = self.self_attn(
query=tgt2, key=tgt2, value=tgt2, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask
)
tgt = tgt + self.dropout1(tgt2)
if not self.prenorm:
tgt = self.norm1(tgt)
if self.prenorm:
tgt = self.norm2(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout2(tgt2)
if not self.prenorm:
tgt = self.norm2(tgt)
return tgt, self_attn_matrices
class MultiHeadAttentionSpatial(nn.Module):
def __init__(
self, d_model, n_head, dropout=0.1, spatial_multihead=True, spatial_dim=5,
spatial_attn_fusion='mul',
):
super().__init__()
assert d_model % n_head == 0, 'd_model: %d, n_head: %d' % (d_model, n_head)
self.n_head = n_head
self.d_model = d_model
self.d_per_head = d_model // n_head
self.spatial_multihead = spatial_multihead
self.spatial_dim = spatial_dim
self.spatial_attn_fusion = spatial_attn_fusion
self.w_qs = nn.Linear(d_model, d_model)
self.w_ks = nn.Linear(d_model, d_model)
self.w_vs = nn.Linear(d_model, d_model)
self.fc = nn.Linear(d_model, d_model)
self.spatial_n_head = n_head if spatial_multihead else 1
if self.spatial_attn_fusion in ['mul', 'bias', 'add']:
self.pairwise_loc_fc = nn.Linear(spatial_dim, self.spatial_n_head)
elif self.spatial_attn_fusion == 'ctx':
self.pairwise_loc_fc = nn.Linear(spatial_dim, d_model)
elif self.spatial_attn_fusion == 'cond':
self.lang_cond_fc = nn.Linear(d_model, self.spatial_n_head * (spatial_dim + 1))
else:
raise NotImplementedError('unsupported spatial_attn_fusion %s' % (self.spatial_attn_fusion))
def forward(self, q, k, v, pairwise_locs, key_padding_mask=None, txt_embeds=None):
residual = q
q = einops.rearrange(self.w_qs(q), 'b l (head k) -> head b l k', head=self.n_head)
k = einops.rearrange(self.w_ks(k), 'b t (head k) -> head b t k', head=self.n_head)
v = einops.rearrange(self.w_vs(v), 'b t (head v) -> head b t v', head=self.n_head)
attn = torch.einsum('hblk,hbtk->hblt', q, k) / np.sqrt(q.shape[-1])
if self.spatial_attn_fusion in ['mul', 'bias', 'add']:
loc_attn = self.pairwise_loc_fc(pairwise_locs)
loc_attn = einops.rearrange(loc_attn, 'b l t h -> h b l t')
if self.spatial_attn_fusion == 'mul':
loc_attn = F.relu(loc_attn)
if not self.spatial_multihead:
loc_attn = einops.repeat(loc_attn, 'h b l t -> (h nh) b l t', nh=self.n_head)
elif self.spatial_attn_fusion == 'ctx':
loc_attn = self.pairwise_loc_fc(pairwise_locs)
loc_attn = einops.rearrange(loc_attn, 'b l t (h k) -> h b l t k', h=self.n_head)
loc_attn = torch.einsum('hblk,hbltk->hblt', q, loc_attn) / np.sqrt(q.shape[-1])
elif self.spatial_attn_fusion == 'cond':
spatial_weights = self.lang_cond_fc(residual)
spatial_weights = einops.rearrange(spatial_weights, 'b l (h d) -> h b l d', h=self.spatial_n_head,
d=self.spatial_dim + 1)
if self.spatial_n_head == 1:
spatial_weights = einops.repeat(spatial_weights, '1 b l d -> h b l d', h=self.n_head)
spatial_bias = spatial_weights[..., :1]
spatial_weights = spatial_weights[..., 1:]
loc_attn = torch.einsum('hbld,bltd->hblt', spatial_weights, pairwise_locs) + spatial_bias
loc_attn = torch.sigmoid(loc_attn)
if key_padding_mask is not None:
mask = einops.repeat(key_padding_mask, 'b t -> h b l t', h=self.n_head, l=q.size(2))
attn = attn.masked_fill(mask, -np.inf)
if self.spatial_attn_fusion in ['mul', 'cond']:
loc_attn = loc_attn.masked_fill(mask, 0)
else:
loc_attn = loc_attn.masked_fill(mask, -np.inf)
if self.spatial_attn_fusion == 'add':
fused_attn = (torch.softmax(attn, 3) + torch.softmax(loc_attn, 3)) / 2
else:
if self.spatial_attn_fusion in ['mul', 'cond']:
fused_attn = torch.log(torch.clamp(loc_attn, min=1e-6)) + attn
else:
fused_attn = loc_attn + attn
fused_attn = torch.softmax(fused_attn, 3)
assert torch.sum(torch.isnan(fused_attn) == 0), print(fused_attn)
output = torch.einsum('hblt,hbtv->hblv', fused_attn, v)
output = einops.rearrange(output, 'head b l v -> b l (head v)')
output = self.fc(output)
return output, fused_attn
class TransformerSpatialDecoderLayer(TransformerDecoderLayer):
def __init__(
self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation="relu",
spatial_multihead=True, spatial_dim=5, spatial_attn_fusion='mul'
):
super().__init__(
d_model, nhead, dim_feedforward=dim_feedforward, dropout=dropout, activation=activation
)
del self.self_attn
self.self_attn = MultiHeadAttentionSpatial(
d_model, nhead, dropout=dropout,
spatial_multihead=spatial_multihead,
spatial_dim=spatial_dim,
spatial_attn_fusion=spatial_attn_fusion,
)
def forward(
self, tgt, memory,
tgt_pairwise_locs: Optional[Tensor] = None,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
):
tgt2 = self.norm1(tgt)
tgt2, self_attn_matrices = self.self_attn(
tgt2, tgt2, tgt2, tgt_pairwise_locs,
key_padding_mask=tgt_key_padding_mask
)
tgt = tgt + self.dropout1(tgt2)
tgt2 = self.norm2(tgt)
tgt2, cross_attn_matrices = self.multihead_attn(
query=tgt2, key=memory,
value=memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask
)
tgt = tgt + self.dropout2(tgt2)
tgt2 = self.norm3(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
tgt = tgt + self.dropout3(tgt2)
return tgt, self_attn_matrices, cross_attn_matrices
class TransformerSpatialEncoderLayer(TransformerEncoderLayer):
def __init__(
self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation="relu",
spatial_multihead=True, spatial_dim=5, spatial_attn_fusion='mul'
):
super().__init__(
d_model, nhead, dim_feedforward=dim_feedforward, dropout=dropout, activation=activation
)
del self.self_attn
self.self_attn = MultiHeadAttentionSpatial(
d_model, nhead, dropout=dropout,
spatial_multihead=spatial_multihead,
spatial_dim=spatial_dim,
spatial_attn_fusion=spatial_attn_fusion,
)
def forward(
self, tgt, tgt_pairwise_locs,
tgt_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
):
tgt2 = tgt
tgt2, self_attn_matrices = self.self_attn(
tgt2, tgt2, tgt2, tgt_pairwise_locs,
key_padding_mask=tgt_key_padding_mask
)
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
return tgt, self_attn_matrices