|
15 | 15 | parser = argparse.ArgumentParser(description='FaceBoxes') |
16 | 16 |
|
17 | 17 | # parser.add_argument('-m', '--trained_model', default='models_2019-04-17-17:59:00/FaceBoxes_epoch_9.pth', type=str) |
18 | | -parser.add_argument('-m', '--trained_model', default='weights/FaceBoxes_epoch_295.pth', type=str) |
| 18 | +parser.add_argument('-m', '--trained_model', default='weights_r730/FaceBoxes_epoch_150.pth', type=str) |
19 | 19 | parser.add_argument('--save_folder', default='eval/', type=str, help='Dir to save results') |
20 | 20 | parser.add_argument('--cuda', default=True, type=bool, help='Use cuda to train model') |
21 | 21 | parser.add_argument('--cpu', default=False, type=bool, help='Use cpu nms') |
22 | 22 | parser.add_argument('--dataset', default='FDDB', type=str, choices=['AFW', 'PASCAL', 'FDDB'], help='dataset') |
23 | 23 | parser.add_argument('--confidence_threshold', default=0.05, type=float, help='confidence_threshold') |
24 | | -parser.add_argument('--top_k', default=5000, type=int, help='top_k') |
25 | | -parser.add_argument('--nms_threshold', default=0.3, type=float, help='nms_threshold') |
26 | | -parser.add_argument('--keep_top_k', default=750, type=int, help='keep_top_k') |
| 24 | +parser.add_argument('--top_k', default=400, type=int, help='top_k') |
| 25 | +parser.add_argument('--nms_threshold', default=0.35, type=float, help='nms_threshold') |
| 26 | +parser.add_argument('--keep_top_k', default=400, type=int, help='keep_top_k') |
27 | 27 | args = parser.parse_args() |
28 | 28 |
|
29 | 29 |
|
@@ -100,9 +100,13 @@ def load_model(model, pretrained_path): |
100 | 100 | image_path = testset_folder + img_name + '.jpg' |
101 | 101 | img = np.float32(cv2.imread(image_path, cv2.IMREAD_COLOR)) |
102 | 102 | if resize != 1: |
103 | | - img = cv2.resize(img, None, None, fx=resize, fy=resize, interpolation=cv2.INTER_LINEAR) |
104 | | - im_height, im_width, _ = img.shape |
| 103 | + img = cv2.resize(img, None, None, fx=resize, fy=resize, interpolation=cv2.INTER_LINEAR) |
| 104 | + |
105 | 105 | scale = torch.Tensor([img.shape[1], img.shape[0], img.shape[1], img.shape[0]]) |
| 106 | + # if args.dataset == "FDDB": |
| 107 | + # img = cv2.resize(img, (1024, 1024), interpolation=cv2.INTER_LINEAR) |
| 108 | + |
| 109 | + im_height, im_width, _ = img.shape |
106 | 110 | img -= (104, 117, 123) |
107 | 111 | img = img.transpose(2, 0, 1) |
108 | 112 | img = torch.from_numpy(img).unsqueeze(0) |
|
0 commit comments