Skip to content

Commit aed8d14

Browse files
author
lijc08
committed
fix
1 parent fd645b9 commit aed8d14

File tree

1 file changed

+10
-6
lines changed

1 file changed

+10
-6
lines changed

test.py

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -15,15 +15,15 @@
1515
parser = argparse.ArgumentParser(description='FaceBoxes')
1616

1717
# parser.add_argument('-m', '--trained_model', default='models_2019-04-17-17:59:00/FaceBoxes_epoch_9.pth', type=str)
18-
parser.add_argument('-m', '--trained_model', default='weights/FaceBoxes_epoch_295.pth', type=str)
18+
parser.add_argument('-m', '--trained_model', default='weights_r730/FaceBoxes_epoch_150.pth', type=str)
1919
parser.add_argument('--save_folder', default='eval/', type=str, help='Dir to save results')
2020
parser.add_argument('--cuda', default=True, type=bool, help='Use cuda to train model')
2121
parser.add_argument('--cpu', default=False, type=bool, help='Use cpu nms')
2222
parser.add_argument('--dataset', default='FDDB', type=str, choices=['AFW', 'PASCAL', 'FDDB'], help='dataset')
2323
parser.add_argument('--confidence_threshold', default=0.05, type=float, help='confidence_threshold')
24-
parser.add_argument('--top_k', default=5000, type=int, help='top_k')
25-
parser.add_argument('--nms_threshold', default=0.3, type=float, help='nms_threshold')
26-
parser.add_argument('--keep_top_k', default=750, type=int, help='keep_top_k')
24+
parser.add_argument('--top_k', default=400, type=int, help='top_k')
25+
parser.add_argument('--nms_threshold', default=0.35, type=float, help='nms_threshold')
26+
parser.add_argument('--keep_top_k', default=400, type=int, help='keep_top_k')
2727
args = parser.parse_args()
2828

2929

@@ -100,9 +100,13 @@ def load_model(model, pretrained_path):
100100
image_path = testset_folder + img_name + '.jpg'
101101
img = np.float32(cv2.imread(image_path, cv2.IMREAD_COLOR))
102102
if resize != 1:
103-
img = cv2.resize(img, None, None, fx=resize, fy=resize, interpolation=cv2.INTER_LINEAR)
104-
im_height, im_width, _ = img.shape
103+
img = cv2.resize(img, None, None, fx=resize, fy=resize, interpolation=cv2.INTER_LINEAR)
104+
105105
scale = torch.Tensor([img.shape[1], img.shape[0], img.shape[1], img.shape[0]])
106+
# if args.dataset == "FDDB":
107+
# img = cv2.resize(img, (1024, 1024), interpolation=cv2.INTER_LINEAR)
108+
109+
im_height, im_width, _ = img.shape
106110
img -= (104, 117, 123)
107111
img = img.transpose(2, 0, 1)
108112
img = torch.from_numpy(img).unsqueeze(0)

0 commit comments

Comments
 (0)