| 
 | 1 | +import numpy as np  | 
 | 2 | + | 
 | 3 | + | 
 | 4 | +def intersect_and_union(pred_label, label, num_classes, ignore_index):  | 
 | 5 | +    """Calculate intersection and Union.  | 
 | 6 | +
  | 
 | 7 | +    Args:  | 
 | 8 | +        pred_label (ndarray): Prediction segmentation map  | 
 | 9 | +        label (ndarray): Ground truth segmentation map  | 
 | 10 | +        num_classes (int): Number of categories  | 
 | 11 | +        ignore_index (int): Index that will be ignored in evaluation.  | 
 | 12 | +
  | 
 | 13 | +     Returns:  | 
 | 14 | +         ndarray: The intersection of prediction and ground truth histogram  | 
 | 15 | +             on all classes  | 
 | 16 | +         ndarray: The union of prediction and ground truth histogram on all  | 
 | 17 | +             classes  | 
 | 18 | +         ndarray: The prediction histogram on all classes.  | 
 | 19 | +         ndarray: The ground truth histogram on all classes.  | 
 | 20 | +    """  | 
 | 21 | + | 
 | 22 | +    mask = (label != ignore_index)  | 
 | 23 | +    pred_label = pred_label[mask]  | 
 | 24 | +    label = label[mask]  | 
 | 25 | + | 
 | 26 | +    intersect = pred_label[pred_label == label]  | 
 | 27 | +    area_intersect, _ = np.histogram(  | 
 | 28 | +        intersect, bins=np.arange(num_classes + 1))  | 
 | 29 | +    area_pred_label, _ = np.histogram(  | 
 | 30 | +        pred_label, bins=np.arange(num_classes + 1))  | 
 | 31 | +    area_label, _ = np.histogram(label, bins=np.arange(num_classes + 1))  | 
 | 32 | +    area_union = area_pred_label + area_label - area_intersect  | 
 | 33 | + | 
 | 34 | +    return area_intersect, area_union, area_pred_label, area_label  | 
 | 35 | + | 
 | 36 | + | 
 | 37 | +def total_intersect_and_union(results, gt_seg_maps, num_classes, ignore_index):  | 
 | 38 | +    """Calculate Total Intersection and Union.  | 
 | 39 | +
  | 
 | 40 | +    Args:  | 
 | 41 | +        results (list[ndarray]): List of prediction segmentation maps  | 
 | 42 | +        gt_seg_maps (list[ndarray]): list of ground truth segmentation maps  | 
 | 43 | +        num_classes (int): Number of categories  | 
 | 44 | +        ignore_index (int): Index that will be ignored in evaluation.  | 
 | 45 | +
  | 
 | 46 | +     Returns:  | 
 | 47 | +         ndarray: The intersection of prediction and ground truth histogram  | 
 | 48 | +             on all classes  | 
 | 49 | +         ndarray: The union of prediction and ground truth histogram on all  | 
 | 50 | +             classes  | 
 | 51 | +         ndarray: The prediction histogram on all classes.  | 
 | 52 | +         ndarray: The ground truth histogram on all classes.  | 
 | 53 | +    """  | 
 | 54 | + | 
 | 55 | +    num_imgs = len(results)  | 
 | 56 | +    assert len(gt_seg_maps) == num_imgs  | 
 | 57 | +    total_area_intersect = np.zeros((num_classes, ), dtype=np.float)  | 
 | 58 | +    total_area_union = np.zeros((num_classes, ), dtype=np.float)  | 
 | 59 | +    total_area_pred_label = np.zeros((num_classes, ), dtype=np.float)  | 
 | 60 | +    total_area_label = np.zeros((num_classes, ), dtype=np.float)  | 
 | 61 | +    for i in range(num_imgs):  | 
 | 62 | +        area_intersect, area_union, area_pred_label, area_label = \  | 
 | 63 | +            intersect_and_union(results[i], gt_seg_maps[i], num_classes,  | 
 | 64 | +                                ignore_index=ignore_index)  | 
 | 65 | +        total_area_intersect += area_intersect  | 
 | 66 | +        total_area_union += area_union  | 
 | 67 | +        total_area_pred_label += area_pred_label  | 
 | 68 | +        total_area_label += area_label  | 
 | 69 | +    return total_area_intersect, total_area_union, \  | 
 | 70 | +        total_area_pred_label, total_area_label  | 
 | 71 | + | 
 | 72 | + | 
 | 73 | +def mean_iou(results, gt_seg_maps, num_classes, ignore_index, nan_to_num=None):  | 
 | 74 | +    """Calculate Mean Intersection and Union (mIoU)  | 
 | 75 | +
  | 
 | 76 | +    Args:  | 
 | 77 | +        results (list[ndarray]): List of prediction segmentation maps  | 
 | 78 | +        gt_seg_maps (list[ndarray]): list of ground truth segmentation maps  | 
 | 79 | +        num_classes (int): Number of categories  | 
 | 80 | +        ignore_index (int): Index that will be ignored in evaluation.  | 
 | 81 | +        nan_to_num (int, optional): If specified, NaN values will be replaced  | 
 | 82 | +            by the numbers defined by the user. Default: None.  | 
 | 83 | +
  | 
 | 84 | +     Returns:  | 
 | 85 | +         float: Overall accuracy on all images.  | 
 | 86 | +         ndarray: Per category accuracy, shape (num_classes, )  | 
 | 87 | +         ndarray: Per category IoU, shape (num_classes, )  | 
 | 88 | +    """  | 
 | 89 | + | 
 | 90 | +    all_acc, acc, iou = eval_metrics(  | 
 | 91 | +        results=results,  | 
 | 92 | +        gt_seg_maps=gt_seg_maps,  | 
 | 93 | +        num_classes=num_classes,  | 
 | 94 | +        ignore_index=ignore_index,  | 
 | 95 | +        metrics=['mIoU'],  | 
 | 96 | +        nan_to_num=nan_to_num)  | 
 | 97 | +    return all_acc, acc, iou  | 
 | 98 | + | 
 | 99 | + | 
 | 100 | +def mean_dice(results,  | 
 | 101 | +              gt_seg_maps,  | 
 | 102 | +              num_classes,  | 
 | 103 | +              ignore_index,  | 
 | 104 | +              nan_to_num=None):  | 
 | 105 | +    """Calculate Mean Dice (mDice)  | 
 | 106 | +
  | 
 | 107 | +    Args:  | 
 | 108 | +        results (list[ndarray]): List of prediction segmentation maps  | 
 | 109 | +        gt_seg_maps (list[ndarray]): list of ground truth segmentation maps  | 
 | 110 | +        num_classes (int): Number of categories  | 
 | 111 | +        ignore_index (int): Index that will be ignored in evaluation.  | 
 | 112 | +        nan_to_num (int, optional): If specified, NaN values will be replaced  | 
 | 113 | +            by the numbers defined by the user. Default: None.  | 
 | 114 | +
  | 
 | 115 | +     Returns:  | 
 | 116 | +         float: Overall accuracy on all images.  | 
 | 117 | +         ndarray: Per category accuracy, shape (num_classes, )  | 
 | 118 | +         ndarray: Per category dice, shape (num_classes, )  | 
 | 119 | +    """  | 
 | 120 | + | 
 | 121 | +    all_acc, acc, dice = eval_metrics(  | 
 | 122 | +        results=results,  | 
 | 123 | +        gt_seg_maps=gt_seg_maps,  | 
 | 124 | +        num_classes=num_classes,  | 
 | 125 | +        ignore_index=ignore_index,  | 
 | 126 | +        metrics=['mDice'],  | 
 | 127 | +        nan_to_num=nan_to_num)  | 
 | 128 | +    return all_acc, acc, dice  | 
 | 129 | + | 
 | 130 | + | 
 | 131 | +def eval_metrics(results,  | 
 | 132 | +                 gt_seg_maps,  | 
 | 133 | +                 num_classes,  | 
 | 134 | +                 ignore_index,  | 
 | 135 | +                 metrics=['mIoU'],  | 
 | 136 | +                 nan_to_num=None):  | 
 | 137 | +    """Calculate evaluation metrics  | 
 | 138 | +    Args:  | 
 | 139 | +        results (list[ndarray]): List of prediction segmentation maps  | 
 | 140 | +        gt_seg_maps (list[ndarray]): list of ground truth segmentation maps  | 
 | 141 | +        num_classes (int): Number of categories  | 
 | 142 | +        ignore_index (int): Index that will be ignored in evaluation.  | 
 | 143 | +        metrics (list[str] | str): Metrics to be evaluated, 'mIoU' and 'mDice'.  | 
 | 144 | +        nan_to_num (int, optional): If specified, NaN values will be replaced  | 
 | 145 | +            by the numbers defined by the user. Default: None.  | 
 | 146 | +     Returns:  | 
 | 147 | +         float: Overall accuracy on all images.  | 
 | 148 | +         ndarray: Per category accuracy, shape (num_classes, )  | 
 | 149 | +         ndarray: Per category evalution metrics, shape (num_classes, )  | 
 | 150 | +    """  | 
 | 151 | + | 
 | 152 | +    if isinstance(metrics, str):  | 
 | 153 | +        metrics = [metrics]  | 
 | 154 | +    allowed_metrics = ['mIoU', 'mDice']  | 
 | 155 | +    if not set(metrics).issubset(set(allowed_metrics)):  | 
 | 156 | +        raise KeyError('metrics {} is not supported'.format(metrics))  | 
 | 157 | +    total_area_intersect, total_area_union, total_area_pred_label, \  | 
 | 158 | +        total_area_label = total_intersect_and_union(results, gt_seg_maps,  | 
 | 159 | +                                                     num_classes,  | 
 | 160 | +                                                     ignore_index=ignore_index)  | 
 | 161 | +    all_acc = total_area_intersect.sum() / total_area_label.sum()  | 
 | 162 | +    acc = total_area_intersect / total_area_label  | 
 | 163 | +    ret_metrics = [all_acc, acc]  | 
 | 164 | +    for metric in metrics:  | 
 | 165 | +        if metric == 'mIoU':  | 
 | 166 | +            iou = total_area_intersect / total_area_union  | 
 | 167 | +            ret_metrics.append(iou)  | 
 | 168 | +        elif metric == 'mDice':  | 
 | 169 | +            dice = 2 * total_area_intersect / (  | 
 | 170 | +                total_area_pred_label + total_area_label)  | 
 | 171 | +            ret_metrics.append(dice)  | 
 | 172 | +    if nan_to_num is not None:  | 
 | 173 | +        ret_metrics = [  | 
 | 174 | +            np.nan_to_num(metric, nan=nan_to_num) for metric in ret_metrics  | 
 | 175 | +        ]  | 
 | 176 | +    return ret_metrics  | 
0 commit comments