Skip to content

Commit c143273

Browse files
authored
Update vitdet_for_ladder_attention_share_pos_embed.py
1 parent 836a321 commit c143273

File tree

1 file changed

+5
-3
lines changed

1 file changed

+5
-3
lines changed

PATH/core/models/backbones/vitdet_for_ladder_attention_share_pos_embed.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -239,8 +239,10 @@ def forward(self, x, H, W):
239239
raise
240240

241241
attn = attn.softmax(dim=-1)
242-
_attn_mask = (torch.isinf(attn) + torch.isnan(attn))
243-
attn = attn.masked_fill(_attn_mask, 0)
242+
_inf_tensor = torch.full_like(attn, float('inf'))
243+
_nan_tensor = torch.full_like(attn, float('nan'))
244+
_attn_mask = (torch.eq(attn, _inf_tensor).int() + torch.eq(attn, _nan_tensor).int())
245+
attn = attn.masked_fill(_attn_mask.bool(), 0)
244246

245247
x = (attn @ v).transpose(1, 2).reshape(B_w, N_w, C)
246248
x = self.proj(x)
@@ -856,7 +858,7 @@ def get_1d_sincos_pos_embed_from_grid(embed_dim, pos):
856858
out: (M, D)
857859
"""
858860
assert embed_dim % 2 == 0
859-
omega = np.arange(embed_dim // 2, dtype=np.float)
861+
omega = np.arange(embed_dim // 2, dtype=np.float32)
860862
omega /= embed_dim / 2.
861863
omega = 1. / 10000**omega # (D/2,)
862864

0 commit comments

Comments
 (0)