• yolov5 优化——mosaic相关


    概述

    Mosaic 利用了四张图片重新排布成一张图片,根据论文所说其拥有一个巨大的优点是丰富检测物体的背景:随机缩放增加了很多小目标,让网络的鲁棒性更好;且在BN计算的时候一下子会计算四张图片的数据!但是,同时:如果我们的数据集本身就有很多的小目标,那么Mosaic数据增强会导致本来较小的目标变得更小,导致模型的泛化能力变差。如果我们的类目的关键信息是框的某个局部(某些场景你又不能标局部)那么裁切可能会将这个局部信息裁切没,导致模型无法拟合真正的信息。

    实现:

    原理和实现可以参考这篇链接睿智的目标检测28——YoloV4当中的Mosaic数据增强方法_Bubbliiiing的博客-CSDN博客_mosaic方法,里面主要是yoloV4 的。但是,yolov5 并没有按部就班、原封不动的使用这种实现。它的实现伪代码是(本文针对3.1版本,看了6.1虽然代码结构变了,实现的核心代码并没有改变):

    1、针对当前一张图,从该epoch内已训练过的图中随机抽取3张图。同时生成一张4倍模型输入大小的背景图(用于将后面四张图贴上去)

    2、针对当前图进行等比缩放(最长边等于模型预设输入尺寸,如640)。将当前图随机上下左右平移(+— 1/2模型输入图尺寸,其中1/2居然是写死的,且牵扯到了后面其他函数)。

    3、将随机抽取的3张图也进行等比缩放(最长边等于模型预设输入尺寸,如640),然后根据当前图的位置,进行依次放在左边,下边、右下。

    至此mosaic 完成,然后通过对生成的4倍大小的图整体进行二次的HSV、旋转、透视变换(这里缩放回原来的预设模型尺寸)、mixup等操作。

    可以看出来生成的图已经比较不鲁棒了,这里感觉到了学术闭集刷性能(确实精召都提高了),和工业界大多开集上,这种训练出来的模型,fpr很难压下去(明显模型已经学错了)。所以工业界还是慎用,或者自己按需修改mosaic的超参或者实现。

    涉及到的源码如下:

    1、配置文件

    yolov5/hyp.finetune.yaml at v3.1 · ultralytics/yolov5 · GitHub

    2、源码这两个部分也需要按需修改强度

    https://github.com/ultralytics/yolov5/blob/v3.1/utils/datasets.py

    源码实现mosaic的主代码如下

    1. def load_mosaic(self, index):
    2. # loads images in a mosaic
    3. labels4 = []
    4. s = self.img_size
    5. yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y
    6. indices = [index] + [random.randint(0, len(self.labels) - 1) for _ in range(3)] # 3 additional image indices
    7. for i, index in enumerate(indices):
    8. # Load image
    9. img, _, (h, w) = load_image(self, index)
    10. # place img in img4
    11. if i == 0: # top left
    12. img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
    13. x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
    14. x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
    15. elif i == 1: # top right
    16. x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
    17. x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
    18. elif i == 2: # bottom left
    19. x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
    20. x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
    21. elif i == 3: # bottom right
    22. x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
    23. x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
    24. img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
    25. padw = x1a - x1b
    26. padh = y1a - y1b
    27. # Labels
    28. x = self.labels[index]
    29. labels = x.copy()
    30. if x.size > 0: # Normalized xywh to pixel xyxy format
    31. labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw
    32. labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh
    33. labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw
    34. labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh
    35. labels4.append(labels)
    36. # print('img4', img4.shape)
    37. # Concat/clip labels
    38. if len(labels4):
    39. labels4 = np.concatenate(labels4, 0)
    40. np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:]) # use with random_perspective
    41. # img4, labels4 = replicate(img4, labels4) # replicate
    42. # Augment
    43. img4, labels4 = random_perspective(img4, labels4,
    44. degrees=self.hyp['degrees'],
    45. translate=self.hyp['translate'],
    46. scale=self.hyp['scale'],
    47. shear=self.hyp['shear'],
    48. perspective=self.hyp['perspective'],
    49. border=self.mosaic_border) # border to remove
    50. return img4, labels4
    1. def load_image(self, index):
    2. # loads 1 image from dataset, returns img, original hw, resized hw
    3. img = self.imgs[index]
    4. if img is None: # not cached
    5. path = self.img_files[index]
    6. img = cv2.imread(path) # BGR
    7. assert img is not None, 'Image Not Found ' + path
    8. h0, w0 = img.shape[:2] # orig hw
    9. r = self.img_size / max(h0, w0) # resize image to img_size
    10. if r != 1: # always resize down, only resize up if training with augmentation
    11. interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR
    12. img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
    13. return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
    14. else:
    15. return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
    1. def random_perspective(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, border=(0, 0)):
    2. # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
    3. # targets = [cls, xyxy]
    4. # height = img.shape[0] + border[0] * 2 # shape(h,w,c)
    5. # width = img.shape[1] + border[1] * 2
    6. if border==(0, 0): # 不使用mosaic
    7. height = img.shape[0] # shape(h,w,c)
    8. width = img.shape[1]
    9. else: # 使用mosaic, 先将图片扩展为2倍,所以需要回归到原始大小
    10. height = img.shape[0]// 2 # shape(h,w,c)
    11. width = img.shape[1] // 2
    12. # Center
    13. C = np.eye(3)
    14. C[0, 2] = -img.shape[1] / 2 # x translation (pixels)
    15. C[1, 2] = -img.shape[0] / 2 # y translation (pixels)
    16. # Perspective
    17. P = np.eye(3)
    18. P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
    19. P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
    20. # Rotation and Scale
    21. R = np.eye(3)
    22. a = random.uniform(-degrees, degrees)
    23. # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
    24. s = random.uniform(1 - scale, 1 + scale)
    25. # s = 2 ** random.uniform(-scale, scale)
    26. R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
    27. # Shear
    28. S = np.eye(3)
    29. S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
    30. S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
    31. # Translation
    32. T = np.eye(3)
    33. T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)
    34. T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)
    35. # Combined rotation matrix
    36. M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
    37. if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
    38. if perspective:
    39. img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114))
    40. else: # affine
    41. img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
    42. # Visualize
    43. # import matplotlib.pyplot as plt
    44. # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
    45. # ax[0].imshow(img[:, :, ::-1]) # base
    46. # ax[1].imshow(img2[:, :, ::-1]) # warped
    47. # Transform label coordinates
    48. n = len(targets)
    49. if n:
    50. # warp points
    51. xy = np.ones((n * 4, 3))
    52. xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
    53. xy = xy @ M.T # transform
    54. if perspective:
    55. xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 8) # rescale
    56. else: # affine
    57. xy = xy[:, :2].reshape(n, 8)
    58. # create new boxes
    59. x = xy[:, [0, 2, 4, 6]]
    60. y = xy[:, [1, 3, 5, 7]]
    61. xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
    62. # # apply angle-based reduction of bounding boxes
    63. # radians = a * math.pi / 180
    64. # reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
    65. # x = (xy[:, 2] + xy[:, 0]) / 2
    66. # y = (xy[:, 3] + xy[:, 1]) / 2
    67. # w = (xy[:, 2] - xy[:, 0]) * reduction
    68. # h = (xy[:, 3] - xy[:, 1]) * reduction
    69. # xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
    70. # clip boxes
    71. xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
    72. xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
    73. # filter candidates
    74. i = box_candidates(box1=targets[:, 1:5].T * s, box2=xy.T)
    75. targets = targets[i]
    76. targets[:, 1:5] = xy[i]
    77. return img, targets
    1. def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
    2. r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
    3. hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
    4. dtype = img.dtype # uint8
    5. x = np.arange(0, 256, dtype=np.int16)
    6. lut_hue = ((x * r[0]) % 180).astype(dtype)
    7. lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
    8. lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
    9. img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
    10. cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
    11. # Histogram equalization
    12. # if random.random() < 0.2:
    13. # for i in range(3):
    14. # img[:, :, i] = cv2.equalizeHist(img[:, :, i])

  • 相关阅读:
    支持亿级标签接入,ClickHouse在广域物联网云平台架构的探索与实践
    java-php-python-ssm校园绿化管理系统计算机毕业设计
    好用到爆,GitHub 星标 32.5k+的命令行软件管理神器,功能真强大
    一篇文章带你掌握性能测试工具——Jmeter
    linux PVE安装
    js根据预设条件定义数组元素
    【C++】C/C++内存管理(new和delete详解)
    【ROS】机器人使用Nomachine进行远程控制
    电子商务盛行,怎么提高商店转换率?
    智能制造容器平台架构设计与实战
  • 原文地址:https://blog.csdn.net/u012863603/article/details/126393028