@@ -70,9 +70,6 @@ def forward(self, predictions, priors, targets):
7070 if GPU :
7171 loc_t = loc_t .cuda ()
7272 conf_t = conf_t .cuda ()
73- # wrap targets
74- loc_t = Variable (loc_t , requires_grad = False )
75- conf_t = Variable (conf_t , requires_grad = False )
7673
7774 pos = conf_t > 0
7875
@@ -81,7 +78,7 @@ def forward(self, predictions, priors, targets):
8178 pos_idx = pos .unsqueeze (pos .dim ()).expand_as (loc_data )
8279 loc_p = loc_data [pos_idx ].view (- 1 , 4 )
8380 loc_t = loc_t [pos_idx ].view (- 1 , 4 )
84- loss_l = F .smooth_l1_loss (loc_p , loc_t , size_average = False )
81+ loss_l = F .smooth_l1_loss (loc_p , loc_t , reduction = 'sum' )
8582
8683 # Compute max conf across batch for hard negative mining
8784 batch_conf = conf_data .view (- 1 , self .num_classes )
@@ -101,7 +98,7 @@ def forward(self, predictions, priors, targets):
10198 neg_idx = neg .unsqueeze (2 ).expand_as (conf_data )
10299 conf_p = conf_data [(pos_idx + neg_idx ).gt (0 )].view (- 1 ,self .num_classes )
103100 targets_weighted = conf_t [(pos + neg ).gt (0 )]
104- loss_c = F .cross_entropy (conf_p , targets_weighted , size_average = False )
101+ loss_c = F .cross_entropy (conf_p , targets_weighted , reduction = 'sum' )
105102
106103 # Sum of losses: L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N
107104 N = max (num_pos .data .sum ().float (), 1 )
0 commit comments