• YOLOV7改进-添加EIOU,SIOU,AlphaIOU,FocalEIOU


    1. 打开utils->general.py
      在这里插入图片描述
      在这里插入图片描述
    2. 找到bbox_iou(),345行左右,将下面的与源码进行替换
    def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, SIoU=False, EIoU=False, WIoU=False, Focal=False, alpha=1, gamma=0.5, scale=False, eps=1e-7):
        # Returns the IoU of box1 to box2. box1 is 4, box2 is nx4
        box2 = box2.T
    
        # Get the coordinates of bounding boxes
        if x1y1x2y2:  # x1, y1, x2, y2 = box1
            b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
            b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
        else:  # transform from xywh to xyxy
            b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2
            b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2
            b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2
            b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2
    
        # Intersection area
        inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \
                (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0)
    
        # Union Area
        w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps
        w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps
        union = w1 * h1 + w2 * h2 - inter + eps
        if scale:
            self = WIoU_Scale(1 - (inter / union))
    
        # IoU
        # iou = inter / union # ori iou
        iou = torch.pow(inter/(union + eps), alpha) # alpha iou
        if CIoU or DIoU or GIoU or EIoU or SIoU or WIoU:
            cw = b1_x2.maximum(b2_x2) - b1_x1.minimum(b2_x1)  # convex (smallest enclosing box) width
            ch = b1_y2.maximum(b2_y2) - b1_y1.minimum(b2_y1)  # convex height
            if CIoU or DIoU or EIoU or SIoU or WIoU:  # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
                c2 = (cw ** 2 + ch ** 2) ** alpha + eps  # convex diagonal squared
                rho2 = (((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4) ** alpha  # center dist ** 2
                if CIoU:  # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
                    v = (4 / math.pi ** 2) * (torch.atan(w2 / h2) - torch.atan(w1 / h1)).pow(2)
                    with torch.no_grad():
                        alpha_ciou = v / (v - iou + (1 + eps))
                    if Focal:
                        return iou - (rho2 / c2 + torch.pow(v * alpha_ciou + eps, alpha)), torch.pow(inter/(union + eps), gamma)  # Focal_CIoU
                    else:
                        return iou - (rho2 / c2 + torch.pow(v * alpha_ciou + eps, alpha))  # CIoU
                elif EIoU:
                    rho_w2 = ((b2_x2 - b2_x1) - (b1_x2 - b1_x1)) ** 2
                    rho_h2 = ((b2_y2 - b2_y1) - (b1_y2 - b1_y1)) ** 2
                    cw2 = torch.pow(cw ** 2 + eps, alpha)
                    ch2 = torch.pow(ch ** 2 + eps, alpha)
                    if Focal:
                        return iou - (rho2 / c2 + rho_w2 / cw2 + rho_h2 / ch2), torch.pow(inter/(union + eps), gamma) # Focal_EIou
                    else:
                        return iou - (rho2 / c2 + rho_w2 / cw2 + rho_h2 / ch2) # EIou
                elif SIoU:
                    # SIoU Loss https://arxiv.org/pdf/2205.12740.pdf
                    s_cw = (b2_x1 + b2_x2 - b1_x1 - b1_x2) * 0.5 + eps
                    s_ch = (b2_y1 + b2_y2 - b1_y1 - b1_y2) * 0.5 + eps
                    sigma = torch.pow(s_cw ** 2 + s_ch ** 2, 0.5)
                    sin_alpha_1 = torch.abs(s_cw) / sigma
                    sin_alpha_2 = torch.abs(s_ch) / sigma
                    threshold = pow(2, 0.5) / 2
                    sin_alpha = torch.where(sin_alpha_1 > threshold, sin_alpha_2, sin_alpha_1)
                    angle_cost = torch.cos(torch.arcsin(sin_alpha) * 2 - math.pi / 2)
                    rho_x = (s_cw / cw) ** 2
                    rho_y = (s_ch / ch) ** 2
                    gamma = angle_cost - 2
                    distance_cost = 2 - torch.exp(gamma * rho_x) - torch.exp(gamma * rho_y)
                    omiga_w = torch.abs(w1 - w2) / torch.max(w1, w2)
                    omiga_h = torch.abs(h1 - h2) / torch.max(h1, h2)
                    shape_cost = torch.pow(1 - torch.exp(-1 * omiga_w), 4) + torch.pow(1 - torch.exp(-1 * omiga_h), 4)
                    if Focal:
                        return iou - torch.pow(0.5 * (distance_cost + shape_cost) + eps, alpha), torch.pow(inter/(union + eps), gamma) # Focal_SIou
                    else:
                        return iou - torch.pow(0.5 * (distance_cost + shape_cost) + eps, alpha) # SIou
                elif WIoU:
                    if Focal:
                        raise RuntimeError("WIoU do not support Focal.")
                    elif scale:
                        return getattr(WIoU_Scale, '_scaled_loss')(self), (1 - iou) * torch.exp((rho2 / c2)), iou # WIoU https://arxiv.org/abs/2301.10051
                    else:
                        return iou, torch.exp((rho2 / c2)) # WIoU v1
                if Focal:
                    return iou - rho2 / c2, torch.pow(inter/(union + eps), gamma)  # Focal_DIoU
                else:
                    return iou - rho2 / c2  # DIoU
            c_area = cw * ch + eps  # convex area
            if Focal:
                return iou - torch.pow((c_area - union) / c_area + eps, alpha), torch.pow(inter/(union + eps), gamma)  # Focal_GIoU https://arxiv.org/pdf/1902.09630.pdf
            else:
                return iou - torch.pow((c_area - union) / c_area + eps, alpha)  # GIoU https://arxiv.org/pdf/1902.09630.pdf
        if Focal:
            return iou, torch.pow(inter/(union + eps), gamma)  # Focal_IoU
        else:
            return iou  # IoU
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69
    • 70
    • 71
    • 72
    • 73
    • 74
    • 75
    • 76
    • 77
    • 78
    • 79
    • 80
    • 81
    • 82
    • 83
    • 84
    • 85
    • 86
    • 87
    • 88
    • 89
    • 90
    • 91
    • 92

    3、打开loss.py,找到ComputeLoss()
    在这里插入图片描述
    在这里插入图片描述

    4、在这里注释掉,并添加

    if type(iou) is tuple:
    	lbox+=(iou[1].detach()*(1-iou[0])).mean()
        iou=iou[0]
    else:
        lbox+=(1.0-iou).mean()
    
    • 1
    • 2
    • 3
    • 4
    • 5

    在这里插入图片描述

    5、使用对应的iou,修改这个CIOU
    在这里插入图片描述
    6、使用Focal-iou的话,ok
    在这里插入图片描述
    这里对应调用之前更换的bbox_iou()里的参数

  • 相关阅读:
    探索亚马逊大语言模型:开启人工智能时代的语言创作新篇章
    【Git】常用命令大全
    Qt Widget 删除之后还会显示 问题
    千古第一文人苏轼的众CP
    LeetCode每日一题(2270. Number of Ways to Split Array)
    【控制台】 \xce\xde\xb7\xa8\xb4\xf2\xbf\xaa
    黑马点评关键业务流程梳理
    代码随想录算法训练营第六十一天 | 503.下一个更大元素II、42. 接雨水
    语句覆盖、条件覆盖、判定覆盖、条件-判定覆盖、路径覆盖
    动力电池“退役潮”来袭,宏工科技助力电池回收再利用
  • 原文地址:https://blog.csdn.net/weixin_43722052/article/details/133581479