• 【YOLOV5】YOLOV5添加OTA


    当前YOLOV5版本为7.0

    第一步  在utils/loss.py添加ComputeLossOTA

    1. import torch.nn.functional as F
    2. from utils.metrics import box_iou
    3. from utils.torch_utils import de_parallel
    4. from utils.general import xywh2xyxy
    5. class ComputeLossOTA:
    6. # Compute losses
    7. def __init__(self, model, autobalance=False):
    8. super(ComputeLossOTA, self).__init__()
    9. device = next(model.parameters()).device # get model device
    10. h = model.hyp # hyperparameters
    11. # Define criteria
    12. BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device))
    13. BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device))
    14. # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
    15. self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets
    16. # Focal loss
    17. g = h['fl_gamma'] # focal loss gamma
    18. if g > 0:
    19. BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)
    20. det = de_parallel(model).model[-1] # Detect() module
    21. self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, .02]) # P3-P7
    22. self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index
    23. self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, 1.0, h, autobalance
    24. for k in 'na', 'nc', 'nl', 'anchors', 'stride':
    25. setattr(self, k, getattr(det, k))
    26. def __call__(self, p, targets, imgs): # predictions, targets, model
    27. device = targets.device
    28. lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device)
    29. bs, as_, gjs, gis, targets, anchors = self.build_targets(p, targets, imgs)
    30. pre_gen_gains = [torch.tensor(pp.shape, device=device)[[3, 2, 3, 2]] for pp in p]
    31. # Losses
    32. for i, pi in enumerate(p): # layer index, layer predictions
    33. b, a, gj, gi = bs[i], as_[i], gjs[i], gis[i] # image, anchor, gridy, gridx
    34. tobj = torch.zeros_like(pi[..., 0], device=device) # target obj
    35. n = b.shape[0] # number of targets
    36. if n:
    37. ps = pi[b, a, gj, gi] # prediction subset corresponding to targets
    38. # Regression
    39. grid = torch.stack([gi, gj], dim=1)
    40. pxy = ps[:, :2].sigmoid() * 2. - 0.5
    41. #pxy = ps[:, :2].sigmoid() * 3. - 1.
    42. pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i]
    43. pbox = torch.cat((pxy, pwh), 1) # predicted box
    44. selected_tbox = targets[i][:, 2:6] * pre_gen_gains[i]
    45. selected_tbox[:, :2] -= grid
    46. iou = bbox_iou(pbox, selected_tbox, CIoU=True) # iou(prediction, target)
    47. if type(iou) is tuple:
    48. lbox += (iou[1].detach() * (1 - iou[0])).mean()
    49. iou = iou[0]
    50. else:
    51. lbox += (1.0 - iou).mean() # iou loss
    52. # Objectness
    53. tobj[b, a, gj, gi] = (1.0 - self.gr) + self.gr * iou.detach().clamp(0).type(tobj.dtype).squeeze() # iou ratio
    54. # Classification
    55. selected_tcls = targets[i][:, 1].long()
    56. if self.nc > 1: # cls loss (only if multiple classes)
    57. t = torch.full_like(ps[:, 5:], self.cn, device=device) # targets
    58. t[range(n), selected_tcls] = self.cp
    59. lcls += self.BCEcls(ps[:, 5:], t) # BCE
    60. # Append targets to text file
    61. # with open('targets.txt', 'a') as file:
    62. # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)]
    63. obji = self.BCEobj(pi[..., 4], tobj)
    64. lobj += obji * self.balance[i] # obj loss
    65. if self.autobalance:
    66. self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item()
    67. if self.autobalance:
    68. self.balance = [x / self.balance[self.ssi] for x in self.balance]
    69. lbox *= self.hyp['box']
    70. lobj *= self.hyp['obj']
    71. lcls *= self.hyp['cls']
    72. bs = tobj.shape[0] # batch size
    73. loss = lbox + lobj + lcls
    74. return loss * bs, torch.cat((lbox, lobj, lcls)).detach()
    75. def build_targets(self, p, targets, imgs):
    76. indices, anch = self.find_3_positive(p, targets)
    77. device = torch.device(targets.device)
    78. matching_bs = [[] for pp in p]
    79. matching_as = [[] for pp in p]
    80. matching_gjs = [[] for pp in p]
    81. matching_gis = [[] for pp in p]
    82. matching_targets = [[] for pp in p]
    83. matching_anchs = [[] for pp in p]
    84. nl = len(p)
    85. for batch_idx in range(p[0].shape[0]):
    86. b_idx = targets[:, 0]==batch_idx
    87. this_target = targets[b_idx]
    88. if this_target.shape[0] == 0:
    89. continue
    90. txywh = this_target[:, 2:6] * imgs[batch_idx].shape[1]
    91. txyxy = xywh2xyxy(txywh)
    92. pxyxys = []
    93. p_cls = []
    94. p_obj = []
    95. from_which_layer = []
    96. all_b = []
    97. all_a = []
    98. all_gj = []
    99. all_gi = []
    100. all_anch = []
    101. for i, pi in enumerate(p):
    102. b, a, gj, gi = indices[i]
    103. idx = (b == batch_idx)
    104. b, a, gj, gi = b[idx], a[idx], gj[idx], gi[idx]
    105. all_b.append(b)
    106. all_a.append(a)
    107. all_gj.append(gj)
    108. all_gi.append(gi)
    109. all_anch.append(anch[i][idx])
    110. from_which_layer.append((torch.ones(size=(len(b),)) * i).to(device))
    111. fg_pred = pi[b, a, gj, gi]
    112. p_obj.append(fg_pred[:, 4:5])
    113. p_cls.append(fg_pred[:, 5:])
    114. grid = torch.stack([gi, gj], dim=1)
    115. pxy = (fg_pred[:, :2].sigmoid() * 2. - 0.5 + grid) * self.stride[i] #/ 8.
    116. #pxy = (fg_pred[:, :2].sigmoid() * 3. - 1. + grid) * self.stride[i]
    117. pwh = (fg_pred[:, 2:4].sigmoid() * 2) ** 2 * anch[i][idx] * self.stride[i] #/ 8.
    118. pxywh = torch.cat([pxy, pwh], dim=-1)
    119. pxyxy = xywh2xyxy(pxywh)
    120. pxyxys.append(pxyxy)
    121. pxyxys = torch.cat(pxyxys, dim=0)
    122. if pxyxys.shape[0] == 0:
    123. continue
    124. p_obj = torch.cat(p_obj, dim=0)
    125. p_cls = torch.cat(p_cls, dim=0)
    126. from_which_layer = torch.cat(from_which_layer, dim=0)
    127. all_b = torch.cat(all_b, dim=0)
    128. all_a = torch.cat(all_a, dim=0)
    129. all_gj = torch.cat(all_gj, dim=0)
    130. all_gi = torch.cat(all_gi, dim=0)
    131. all_anch = torch.cat(all_anch, dim=0)
    132. pair_wise_iou = box_iou(txyxy, pxyxys)
    133. pair_wise_iou_loss = -torch.log(pair_wise_iou + 1e-8)
    134. top_k, _ = torch.topk(pair_wise_iou, min(10, pair_wise_iou.shape[1]), dim=1)
    135. dynamic_ks = torch.clamp(top_k.sum(1).int(), min=1)
    136. gt_cls_per_image = (
    137. F.one_hot(this_target[:, 1].to(torch.int64), self.nc)
    138. .float()
    139. .unsqueeze(1)
    140. .repeat(1, pxyxys.shape[0], 1)
    141. )
    142. num_gt = this_target.shape[0]
    143. cls_preds_ = (
    144. p_cls.float().unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_()
    145. * p_obj.unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_()
    146. )
    147. y = cls_preds_.sqrt_()
    148. pair_wise_cls_loss = F.binary_cross_entropy_with_logits(
    149. torch.log(y/(1-y)) , gt_cls_per_image, reduction="none"
    150. ).sum(-1)
    151. del cls_preds_
    152. cost = (
    153. pair_wise_cls_loss
    154. + 3.0 * pair_wise_iou_loss
    155. )
    156. matching_matrix = torch.zeros_like(cost, device=device)
    157. for gt_idx in range(num_gt):
    158. _, pos_idx = torch.topk(
    159. cost[gt_idx], k=dynamic_ks[gt_idx].item(), largest=False
    160. )
    161. matching_matrix[gt_idx][pos_idx] = 1.0
    162. del top_k, dynamic_ks
    163. anchor_matching_gt = matching_matrix.sum(0)
    164. if (anchor_matching_gt > 1).sum() > 0:
    165. _, cost_argmin = torch.min(cost[:, anchor_matching_gt > 1], dim=0)
    166. matching_matrix[:, anchor_matching_gt > 1] *= 0.0
    167. matching_matrix[cost_argmin, anchor_matching_gt > 1] = 1.0
    168. fg_mask_inboxes = (matching_matrix.sum(0) > 0.0).to(device)
    169. matched_gt_inds = matching_matrix[:, fg_mask_inboxes].argmax(0)
    170. from_which_layer = from_which_layer[fg_mask_inboxes]
    171. all_b = all_b[fg_mask_inboxes]
    172. all_a = all_a[fg_mask_inboxes]
    173. all_gj = all_gj[fg_mask_inboxes]
    174. all_gi = all_gi[fg_mask_inboxes]
    175. all_anch = all_anch[fg_mask_inboxes]
    176. this_target = this_target[matched_gt_inds]
    177. for i in range(nl):
    178. layer_idx = from_which_layer == i
    179. matching_bs[i].append(all_b[layer_idx])
    180. matching_as[i].append(all_a[layer_idx])
    181. matching_gjs[i].append(all_gj[layer_idx])
    182. matching_gis[i].append(all_gi[layer_idx])
    183. matching_targets[i].append(this_target[layer_idx])
    184. matching_anchs[i].append(all_anch[layer_idx])
    185. for i in range(nl):
    186. if matching_targets[i] != []:
    187. matching_bs[i] = torch.cat(matching_bs[i], dim=0)
    188. matching_as[i] = torch.cat(matching_as[i], dim=0)
    189. matching_gjs[i] = torch.cat(matching_gjs[i], dim=0)
    190. matching_gis[i] = torch.cat(matching_gis[i], dim=0)
    191. matching_targets[i] = torch.cat(matching_targets[i], dim=0)
    192. matching_anchs[i] = torch.cat(matching_anchs[i], dim=0)
    193. else:
    194. matching_bs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)
    195. matching_as[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)
    196. matching_gjs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)
    197. matching_gis[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)
    198. matching_targets[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)
    199. matching_anchs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)
    200. return matching_bs, matching_as, matching_gjs, matching_gis, matching_targets, matching_anchs
    201. def find_3_positive(self, p, targets):
    202. # Build targets for compute_loss(), input targets(image,class,x,y,w,h)
    203. na, nt = self.na, targets.shape[0] # number of anchors, targets
    204. indices, anch = [], []
    205. gain = torch.ones(7, device=targets.device).long() # normalized to gridspace gain
    206. ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt)
    207. targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices
    208. g = 0.5 # bias
    209. off = torch.tensor([[0, 0],
    210. [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m
    211. # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm
    212. ], device=targets.device).float() * g # offsets
    213. for i in range(self.nl):
    214. anchors = self.anchors[i]
    215. gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain
    216. # Match targets to anchors
    217. t = targets * gain
    218. if nt:
    219. # Matches
    220. r = t[:, :, 4:6] / anchors[:, None] # wh ratio
    221. j = torch.max(r, 1. / r).max(2)[0] < self.hyp['anchor_t'] # compare
    222. # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2))
    223. t = t[j] # filter
    224. # Offsets
    225. gxy = t[:, 2:4] # grid xy
    226. gxi = gain[[2, 3]] - gxy # inverse
    227. j, k = ((gxy % 1. < g) & (gxy > 1.)).T
    228. l, m = ((gxi % 1. < g) & (gxi > 1.)).T
    229. j = torch.stack((torch.ones_like(j), j, k, l, m))
    230. t = t.repeat((5, 1, 1))[j]
    231. offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j]
    232. else:
    233. t = targets[0]
    234. offsets = 0
    235. # Define
    236. b, c = t[:, :2].long().T # image, class
    237. gxy = t[:, 2:4] # grid xy
    238. gwh = t[:, 4:6] # grid wh
    239. gij = (gxy - offsets).long()
    240. gi, gj = gij.T # grid xy indices
    241. # Append
    242. a = t[:, 6].long() # anchor indices
    243. indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices
    244. anch.append(anchors[a]) # anchors
    245. return indices, anch

    第二步 修改train.py

    1. # 1. 导入ComputeLossOTA
    2. from utils.loss import ComputeLossOTA
    3. # 2. 修改损失函数初始化
    4. compute_loss = ComputeLossOTA(model)
    5. # 3. 修改损失函数调用
    6. loss, loss_items = compute_loss(pred, targets.to(device),imgs)

    第三步 修改val.py

    1. # 1. 修改损失函数调用
    2. loss += compute_loss(train_out, targets, im)[1] # box, obj, cls

  • 相关阅读:
    PNG怎么转成PDF格式?这两种方法一定要尝试一下
    大二学生JavaScript实训大作业——动漫秦时明月7页 期末网页制作 HTML+CSS+JavaScript 网页设计实例 企业网站制作
    虚拟机桥接模式连接失败解决方案
    Swift 另辟蹊径极速生成图片的缩略图
    【Maven】基础
    ZYNQ双核启动和固化步骤
    如何在WIndows虚拟机安装 macOS 黑苹果系统?
    mac m1 docker安装nacos
    JMeter 面试题及答案整理,最新面试题
    基于stm32单片机DHT11温湿度测量自动加湿器散热器Proteus仿真
  • 原文地址:https://blog.csdn.net/wxd1233/article/details/132895527