• 应用第三方ByteTrack实现目标跟踪


    在上一篇博文,我们实现了应用官网ByteTrack实现的目标跟踪。但吹毛求疵地说,官网的ByteTrack有一些不足:1、为了通用性及科研的要求,代码过于冗长,但这不利于集成到自己的程序中;2、目标跟踪结果没有目标类别的信息,需要自己额外添加;3、再次运行算法时,ID不会复位,它会接着上次的ID继续排序。因此,我们完全有理由对其进行优化。

    github中,有很多优化后的ByteTrack,所以我们没有必要自己去优化。在这里我们选择下面这个ByteTrack:

    https://github.com/jahongir7174/ByteTrack/

    为此,我们把核心函数整合到一个名为track.py的文件内,并把其放入到mytrack的文件夹内,然后把该文件夹复制到当前目录下。我把track.py文件内的完整内容复制到本博文的后面。

    为使用该文件,先要导入:

    from mytrack import track

    然后实例化ByteTrack:

    bytetracker = track.BYTETracker(fps)

    BYTETracker只需要一个输入参数,其表示视频的帧率,默认为30。

    使用BYTETracker也很简单:

    tracks = bytetracker.update(boxes, scores, object_classes)

    三个输入参数都为目标检测得到的每一帧的检测结果,数据类型为numpy.array。boxes为目标边框的左上角和右下角坐标,scores为目标置信值,object_classes为目标类型。

    输出参数为该帧的跟踪结果,前4个元素为目标边框的左上角和右下角坐标(其与boxes有所不同),第4个元素为跟踪ID,第5个元素为目标得分值,第6个元素为目标类型,第7个元素为boxes所对应的索引值。

    可以看出该第三方ByteTrack与官网的ByteTrack使用方法区别不大。

    为使用该ByteTrack,只需要安装以下软件包即可:

    1. conda install python=3.8
    2. pip install scipy -i https://pypi.tuna.tsinghua.edu.cn/simple
    3. conda install -c conda-forge lap

    从中可以看出,该第三方ByteTrack所需的软件包很少,安装起来也不会出现各类莫名其妙的问题。

    下面就可以编写目标跟踪代码了。在这里,我们只把if outputs is not None:内的部分展示出来,而其他部分与上一篇博文的代码是一样的:

    1. for output in outputs:
    2. x1, y1, x2, y2 = list(map(int, output[:4]))
    3. boxes.append([x1, y1, x2, y2])
    4. confidences.append(output[4])
    5. object_classes.append(output[5])
    6. tracks = bytetracker.update(np.array(boxes), np.array(confidences), np.array(object_classes))
    7. if len(tracks) > 0:
    8. identities = tracks[:, 4]
    9. object_classes = tracks[:, 6]
    10. idxs = tracks[:, 7]
    11. for i, identity in enumerate(identities):
    12. if object_classes[i] == 2:
    13. box_label(frame, outputs[int(idxs[i]),:4], '#'+str(int(identity))+' car' , (167, 146, 11))
    14. elif object_classes[i] == 5:
    15. box_label(frame, outputs[int(idxs[i]),:4], '#'+str(int(identity))+' bus', (186, 55, 2))
    16. elif object_classes[i] == 7:
    17. box_label(frame, outputs[int(idxs[i]),:4], '#'+str(int(identity))+' truck', (19, 222, 24))

    下面是完整的track.py内容:

    1. import numpy
    2. import lap
    3. import scipy
    4. def linear_assignment(cost_matrix, thresh):
    5. # Linear assignment implementations with scipy and lap.lapjv
    6. if cost_matrix.size == 0:
    7. matches = numpy.empty((0, 2), dtype=int)
    8. unmatched_a = tuple(range(cost_matrix.shape[0]))
    9. unmatched_b = tuple(range(cost_matrix.shape[1]))
    10. return matches, unmatched_a, unmatched_b
    11. _, x, y = lap.lapjv(cost_matrix, extend_cost=True, cost_limit=thresh)
    12. matches = [[ix, mx] for ix, mx in enumerate(x) if mx >= 0]
    13. unmatched_a = numpy.where(x < 0)[0]
    14. unmatched_b = numpy.where(y < 0)[0]
    15. return matches, unmatched_a, unmatched_b
    16. def compute_iou(a_boxes, b_boxes):
    17. """
    18. Compute cost based on IoU
    19. :type a_boxes: list[tlbr] | np.ndarray
    20. :type b_boxes: list[tlbr] | np.ndarray
    21. :rtype iou | np.ndarray
    22. """
    23. iou = numpy.zeros((len(a_boxes), len(b_boxes)), dtype=numpy.float32)
    24. if iou.size == 0:
    25. return iou
    26. a_boxes = numpy.ascontiguousarray(a_boxes, dtype=numpy.float32)
    27. b_boxes = numpy.ascontiguousarray(b_boxes, dtype=numpy.float32)
    28. # Get the coordinates of bounding boxes
    29. b1_x1, b1_y1, b1_x2, b1_y2 = a_boxes.T
    30. b2_x1, b2_y1, b2_x2, b2_y2 = b_boxes.T
    31. # Intersection area
    32. inter_area = (numpy.minimum(b1_x2[:, None], b2_x2) - numpy.maximum(b1_x1[:, None], b2_x1)).clip(0) * \
    33. (numpy.minimum(b1_y2[:, None], b2_y2) - numpy.maximum(b1_y1[:, None], b2_y1)).clip(0)
    34. # box2 area
    35. box1_area = (b1_x2 - b1_x1) * (b1_y2 - b1_y1)
    36. box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1)
    37. return inter_area / (box2_area + box1_area[:, None] - inter_area + 1E-7)
    38. def iou_distance(a_tracks, b_tracks):
    39. """
    40. Compute cost based on IoU
    41. :type a_tracks: list[STrack]
    42. :type b_tracks: list[STrack]
    43. :rtype cost_matrix np.ndarray
    44. """
    45. if (len(a_tracks) > 0 and isinstance(a_tracks[0], numpy.ndarray)) \
    46. or (len(b_tracks) > 0 and isinstance(b_tracks[0], numpy.ndarray)):
    47. a_boxes = a_tracks
    48. b_boxes = b_tracks
    49. else:
    50. a_boxes = [track.tlbr for track in a_tracks]
    51. b_boxes = [track.tlbr for track in b_tracks]
    52. return 1 - compute_iou(a_boxes, b_boxes) # cost matrix
    53. def fuse_score(cost_matrix, detections):
    54. if cost_matrix.size == 0:
    55. return cost_matrix
    56. iou_sim = 1 - cost_matrix
    57. det_scores = numpy.array([det.score for det in detections])
    58. det_scores = numpy.expand_dims(det_scores, axis=0).repeat(cost_matrix.shape[0], axis=0)
    59. fuse_sim = iou_sim * det_scores
    60. return 1 - fuse_sim # fuse_cost
    61. class KalmanFilterXYAH:
    62. """
    63. A Kalman filter for tracking bounding boxes in image space.
    64. The 8-dimensional state space
    65. x, y, a, h, vx, vy, va, vh
    66. contains the bounding box center position (x, y), aspect ratio a, height h,
    67. and their respective velocities.
    68. Object motion follows a constant velocity model. The bounding box location
    69. (x, y, a, h) is taken as direct observation of the state space (linear
    70. observation model).
    71. """
    72. def __init__(self):
    73. ndim, dt = 4, 1.
    74. # Create Kalman filter model matrices.
    75. self._motion_mat = numpy.eye(2 * ndim, 2 * ndim)
    76. for i in range(ndim):
    77. self._motion_mat[i, ndim + i] = dt
    78. self._update_mat = numpy.eye(ndim, 2 * ndim)
    79. # Motion and observation uncertainty are chosen relative to the current
    80. # state estimate. These weights control the amount of uncertainty in
    81. # the model. This is a bit hacky.
    82. self._std_weight_position = 1. / 20
    83. self._std_weight_velocity = 1. / 160
    84. def initiate(self, measurement):
    85. """
    86. Create track from unassociated measurement.
    87. Parameters
    88. ----------
    89. measurement : ndarray
    90. Bounding box coordinates (x, y, a, h) with center position (x, y),
    91. aspect ratio a, and height h.
    92. Returns
    93. -------
    94. (ndarray, ndarray)
    95. Returns the mean vector (8 dimensional) and covariance matrix (8x8
    96. dimensional) of the new track. Unobserved velocities are initialized
    97. to 0 mean.
    98. """
    99. mean_pos = measurement
    100. mean_vel = numpy.zeros_like(mean_pos)
    101. mean = numpy.r_[mean_pos, mean_vel]
    102. std = [2 * self._std_weight_position * measurement[3],
    103. 2 * self._std_weight_position * measurement[3],
    104. 1e-2,
    105. 2 * self._std_weight_position * measurement[3],
    106. 10 * self._std_weight_velocity * measurement[3],
    107. 10 * self._std_weight_velocity * measurement[3],
    108. 1e-5,
    109. 10 * self._std_weight_velocity * measurement[3]]
    110. covariance = numpy.diag(numpy.square(std))
    111. return mean, covariance
    112. def predict(self, mean, covariance):
    113. """
    114. Run Kalman filter prediction step.
    115. Parameters
    116. ----------
    117. mean : ndarray
    118. The 8 dimensional mean vector of the object state at the previous
    119. time step.
    120. covariance : ndarray
    121. The 8x8 dimensional covariance matrix of the object state at the
    122. previous time step.
    123. Returns
    124. -------
    125. (ndarray, ndarray)
    126. Returns the mean vector and covariance matrix of the predicted
    127. state. Unobserved velocities are initialized to 0 mean.
    128. """
    129. std_pos = [self._std_weight_position * mean[3],
    130. self._std_weight_position * mean[3],
    131. 1e-2,
    132. self._std_weight_position * mean[3]]
    133. std_vel = [self._std_weight_velocity * mean[3],
    134. self._std_weight_velocity * mean[3],
    135. 1e-5,
    136. self._std_weight_velocity * mean[3]]
    137. motion_cov = numpy.diag(numpy.square(numpy.r_[std_pos, std_vel]))
    138. # mean = np.dot(self._motion_mat, mean)
    139. mean = numpy.dot(mean, self._motion_mat.T)
    140. covariance = numpy.linalg.multi_dot((self._motion_mat, covariance, self._motion_mat.T)) + motion_cov
    141. return mean, covariance
    142. def project(self, mean, covariance):
    143. """
    144. Project state distribution to measurement space.
    145. Parameters
    146. ----------
    147. mean : ndarray
    148. The state's mean vector (8 dimensional array).
    149. covariance : ndarray
    150. The state's covariance matrix (8x8 dimensional).
    151. Returns
    152. -------
    153. (ndarray, ndarray)
    154. Returns the projected mean and covariance matrix of the given state
    155. estimate.
    156. """
    157. std = [self._std_weight_position * mean[3],
    158. self._std_weight_position * mean[3],
    159. 1e-1,
    160. self._std_weight_position * mean[3]]
    161. innovation_cov = numpy.diag(numpy.square(std))
    162. mean = numpy.dot(self._update_mat, mean)
    163. covariance = numpy.linalg.multi_dot((self._update_mat, covariance, self._update_mat.T))
    164. return mean, covariance + innovation_cov
    165. def multi_predict(self, mean, covariance):
    166. """
    167. Run Kalman filter prediction step (Vectorized version).
    168. Parameters
    169. ----------
    170. mean : ndarray
    171. The Nx8 dimensional mean matrix of the object states at the previous
    172. time step.
    173. covariance : ndarray
    174. The Nx8x8 dimensional covariance matrix of the object states at the
    175. previous time step.
    176. Returns
    177. -------
    178. (ndarray, ndarray)
    179. Returns the mean vector and covariance matrix of the predicted
    180. state. Unobserved velocities are initialized to 0 mean.
    181. """
    182. std_pos = [self._std_weight_position * mean[:, 3],
    183. self._std_weight_position * mean[:, 3],
    184. 1e-2 * numpy.ones_like(mean[:, 3]),
    185. self._std_weight_position * mean[:, 3]]
    186. std_vel = [self._std_weight_velocity * mean[:, 3],
    187. self._std_weight_velocity * mean[:, 3],
    188. 1e-5 * numpy.ones_like(mean[:, 3]),
    189. self._std_weight_velocity * mean[:, 3]]
    190. sqr = numpy.square(numpy.r_[std_pos, std_vel]).T
    191. motion_cov = [numpy.diag(sqr[i]) for i in range(len(mean))]
    192. motion_cov = numpy.asarray(motion_cov)
    193. #print(mean)
    194. #print('eee')
    195. #print(self._motion_mat.T)
    196. mean = numpy.dot(mean, self._motion_mat.T)
    197. #print('fff')
    198. left = numpy.dot(self._motion_mat, covariance).transpose((1, 0, 2))
    199. covariance = numpy.dot(left, self._motion_mat.T) + motion_cov
    200. return mean, covariance
    201. def update(self, mean, covariance, measurement):
    202. """
    203. Run Kalman filter correction step.
    204. Parameters
    205. ----------
    206. mean : ndarray
    207. The predicted state's mean vector (8 dimensional).
    208. covariance : ndarray
    209. The state's covariance matrix (8x8 dimensional).
    210. measurement : ndarray
    211. The 4 dimensional measurement vector (x, y, a, h), where (x, y)
    212. is the center position, a the aspect ratio, and h the height of the
    213. bounding box.
    214. Returns
    215. -------
    216. (ndarray, ndarray)
    217. Returns the measurement-corrected state distribution.
    218. """
    219. projected_mean, projected_cov = self.project(mean, covariance)
    220. chol_factor, lower = scipy.linalg.cho_factor(projected_cov, lower=True, check_finite=False)
    221. kalman_gain = scipy.linalg.cho_solve((chol_factor, lower),
    222. numpy.dot(covariance, self._update_mat.T).T,
    223. check_finite=False).T
    224. innovation = measurement - projected_mean
    225. new_mean = mean + numpy.dot(innovation, kalman_gain.T)
    226. new_covariance = covariance - numpy.linalg.multi_dot((kalman_gain, projected_cov, kalman_gain.T))
    227. return new_mean, new_covariance
    228. def gating_distance(self, mean, covariance, measurements, only_position=False, metric='maha'):
    229. """
    230. Compute gating distance between state distribution and measurements.
    231. A suitable distance threshold can be obtained from `chi2inv95`. If
    232. `only_position` is False, the chi-square distribution has 4 degrees of
    233. freedom, otherwise 2.
    234. Parameters
    235. ----------
    236. mean : ndarray
    237. Mean vector over the state distribution (8 dimensional).
    238. covariance : ndarray
    239. Covariance of the state distribution (8x8 dimensional).
    240. measurements : ndarray
    241. An Nx4 dimensional matrix of N measurements, each in
    242. format (x, y, a, h) where (x, y) is the bounding box center
    243. position, a the aspect ratio, and h the height.
    244. only_position : Optional[bool]
    245. If True, distance computation is done with respect to the bounding
    246. box center position only.
    247. metric : str
    248. Distance metric.
    249. Returns
    250. -------
    251. ndarray
    252. Returns an array of length N, where the i-th element contains the
    253. squared Mahalanobis distance between (mean, covariance) and
    254. `measurements[i]`.
    255. """
    256. mean, covariance = self.project(mean, covariance)
    257. if only_position:
    258. mean, covariance = mean[:2], covariance[:2, :2]
    259. measurements = measurements[:, :2]
    260. d = measurements - mean
    261. if metric == 'gaussian':
    262. return numpy.sum(d * d, axis=1)
    263. elif metric == 'maha':
    264. factor = numpy.linalg.cholesky(covariance)
    265. z = scipy.linalg.solve_triangular(factor, d.T, lower=True, check_finite=False, overwrite_b=True)
    266. return numpy.sum(z * z, axis=0) # square maha
    267. else:
    268. raise ValueError('invalid distance metric')
    269. class State:
    270. New = 0
    271. Tracked = 1
    272. Lost = 2
    273. Removed = 3
    274. class Track:
    275. count = 0
    276. shared_kalman = KalmanFilterXYAH()
    277. def __init__(self, tlwh, score, cls):
    278. # wait activate
    279. self._tlwh = numpy.asarray(self.tlbr_to_tlwh(tlwh[:-1]), dtype=numpy.float32)
    280. self.kalman_filter = None
    281. self.mean, self.covariance = None, None
    282. self.is_activated = False
    283. self.score = score
    284. self.tracklet_len = 0
    285. self.cls = cls
    286. self.idx = tlwh[-1]
    287. def predict(self):
    288. mean_state = self.mean.copy()
    289. if self.state != State.Tracked:
    290. mean_state[7] = 0
    291. self.mean, self.covariance = self.kalman_filter.predict(mean_state, self.covariance)
    292. @staticmethod
    293. def multi_predict(tracks):
    294. if len(tracks) <= 0:
    295. return
    296. multi_mean = numpy.asarray([st.mean.copy() for st in tracks])
    297. multi_covariance = numpy.asarray([st.covariance for st in tracks])
    298. for i, st in enumerate(tracks):
    299. if st.state != State.Tracked:
    300. multi_mean[i][7] = 0
    301. multi_mean, multi_covariance = Track.shared_kalman.multi_predict(multi_mean, multi_covariance)
    302. #print('eee')
    303. for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)):
    304. tracks[i].mean = mean
    305. tracks[i].covariance = cov
    306. def activate(self, kalman_filter, frame_id):
    307. """Start a new tracklet"""
    308. self.kalman_filter = kalman_filter
    309. self.track_id = self.next_id()
    310. self.mean, self.covariance = self.kalman_filter.initiate(self.convert_coords(self._tlwh))
    311. self.tracklet_len = 0
    312. self.state = State.Tracked
    313. if frame_id == 1:
    314. self.is_activated = True
    315. self.frame_id = frame_id
    316. self.start_frame = frame_id
    317. def re_activate(self, new_track, frame_id, new_id=False):
    318. self.mean, self.covariance = self.kalman_filter.update(self.mean, self.covariance,
    319. self.convert_coords(new_track.tlwh))
    320. self.tracklet_len = 0
    321. self.state = State.Tracked
    322. self.is_activated = True
    323. self.frame_id = frame_id
    324. if new_id:
    325. self.track_id = self.next_id()
    326. self.score = new_track.score
    327. self.cls = new_track.cls
    328. self.idx = new_track.idx
    329. def update(self, new_track, frame_id):
    330. """
    331. Update a matched track
    332. :type new_track: Track
    333. :type frame_id: int
    334. :return:
    335. """
    336. self.frame_id = frame_id
    337. self.tracklet_len += 1
    338. new_tlwh = new_track.tlwh
    339. self.mean, self.covariance = self.kalman_filter.update(self.mean, self.covariance,
    340. self.convert_coords(new_tlwh))
    341. self.state = State.Tracked
    342. self.is_activated = True
    343. self.score = new_track.score
    344. self.cls = new_track.cls
    345. self.idx = new_track.idx
    346. def convert_coords(self, tlwh):
    347. return self.tlwh_to_xyah(tlwh)
    348. def mark_lost(self):
    349. self.state = State.Lost
    350. def mark_removed(self):
    351. self.state = State.Removed
    352. @property
    353. def end_frame(self):
    354. return self.frame_id
    355. @staticmethod
    356. def next_id():
    357. Track.count += 1
    358. return Track.count
    359. @property
    360. def tlwh(self):
    361. """Get current position in bounding box format `(top left x, top left y,
    362. width, height)`.
    363. """
    364. if self.mean is None:
    365. return self._tlwh.copy()
    366. ret = self.mean[:4].copy()
    367. ret[2] *= ret[3]
    368. ret[:2] -= ret[2:] / 2
    369. return ret
    370. @property
    371. def tlbr(self):
    372. """Convert bounding box to format `(min x, min y, max x, max y)`, i.e.,
    373. `(top left, bottom right)`.
    374. """
    375. ret = self.tlwh.copy()
    376. ret[2:] += ret[:2]
    377. return ret
    378. @staticmethod
    379. def reset_id():
    380. Track.count = 0
    381. @staticmethod
    382. def tlwh_to_xyah(tlwh):
    383. """Convert bounding box to format `(center x, center y, aspect ratio,
    384. height)`, where the aspect ratio is `width / height`.
    385. """
    386. ret = numpy.asarray(tlwh).copy()
    387. ret[:2] += ret[2:] / 2
    388. ret[2] /= ret[3]
    389. return ret
    390. @staticmethod
    391. def tlbr_to_tlwh(tlbr):
    392. ret = numpy.asarray(tlbr).copy()
    393. ret[2:] -= ret[:2]
    394. return ret
    395. @staticmethod
    396. def tlwh_to_tlbr(tlwh):
    397. ret = numpy.asarray(tlwh).copy()
    398. ret[2:] += ret[:2]
    399. return ret
    400. def __repr__(self):
    401. return f'OT_{self.track_id}_({self.start_frame}-{self.end_frame})'
    402. class BYTETracker:
    403. def __init__(self, frame_rate=30):
    404. self.tracked_tracks = []
    405. self.lost_tracks = []
    406. self.removed_tracks = []
    407. self.frame_id = 0
    408. self.max_time_lost = int(frame_rate)
    409. self.kalman_filter = KalmanFilterXYAH()
    410. self.reset_id()
    411. def update(self, boxes, scores, object_classes):
    412. self.frame_id += 1
    413. activated_tracks = []
    414. re_find_tracks = []
    415. lost_tracks = []
    416. removed_tracks = []
    417. # add index
    418. boxes = numpy.concatenate([boxes, numpy.arange(len(boxes)).reshape(-1, 1)], axis=-1)
    419. indices_low = scores > 0.1
    420. indices_high = scores < 0.5
    421. indices_remain = scores > 0.5
    422. indices_second = numpy.logical_and(indices_low, indices_high)
    423. boxes_second = boxes[indices_second]
    424. boxes = boxes[indices_remain]
    425. scores_keep = scores[indices_remain]
    426. scores_second = scores[indices_second]
    427. cls_keep = object_classes[indices_remain]
    428. cls_second = object_classes[indices_second]
    429. detections = self.init_track(boxes, scores_keep, cls_keep)
    430. """ Add newly detected tracklets to tracked_stracks"""
    431. unconfirmed = []
    432. tracked_stracks = []
    433. for track in self.tracked_tracks:
    434. if not track.is_activated:
    435. unconfirmed.append(track)
    436. else:
    437. tracked_stracks.append(track)
    438. """ Step 2: First association, with high score detection boxes"""
    439. track_pool = self.joint_stracks(tracked_stracks, self.lost_tracks)
    440. # Predict the current location with KF
    441. self.multi_predict(track_pool)
    442. #print('ddd')
    443. dists = self.get_dists(track_pool, detections)
    444. matches, u_track, u_detection = linear_assignment(dists, thresh=0.8)
    445. for tracked_i, box_i in matches:
    446. track = track_pool[tracked_i]
    447. det = detections[box_i]
    448. if track.state == State.Tracked:
    449. track.update(det, self.frame_id)
    450. activated_tracks.append(track)
    451. else:
    452. track.re_activate(det, self.frame_id, new_id=False)
    453. re_find_tracks.append(track)
    454. """ Step 3: Second association, with low score detection boxes"""
    455. # association the untrack to the low score detections
    456. detections_second = self.init_track(boxes_second, scores_second, cls_second)
    457. r_tracked_tracks = [track_pool[i] for i in u_track if track_pool[i].state == State.Tracked]
    458. dists = iou_distance(r_tracked_tracks, detections_second)
    459. matches, u_track, u_detection_second = linear_assignment(dists, thresh=0.5)
    460. for tracked_i, box_i in matches:
    461. track = r_tracked_tracks[tracked_i]
    462. det = detections_second[box_i]
    463. if track.state == State.Tracked:
    464. track.update(det, self.frame_id)
    465. activated_tracks.append(track)
    466. else:
    467. track.re_activate(det, self.frame_id, new_id=False)
    468. re_find_tracks.append(track)
    469. for it in u_track:
    470. track = r_tracked_tracks[it]
    471. if track.state != State.Lost:
    472. track.mark_lost()
    473. lost_tracks.append(track)
    474. """Deal with unconfirmed tracks, usually tracks with only one beginning frame"""
    475. detections = [detections[i] for i in u_detection]
    476. dists = self.get_dists(unconfirmed, detections)
    477. matches, u_unconfirmed, u_detection = linear_assignment(dists, thresh=0.7)
    478. for tracked_i, box_i in matches:
    479. unconfirmed[tracked_i].update(detections[box_i], self.frame_id)
    480. activated_tracks.append(unconfirmed[tracked_i])
    481. for it in u_unconfirmed:
    482. track = unconfirmed[it]
    483. track.mark_removed()
    484. removed_tracks.append(track)
    485. """ Step 4: Init new stracks"""
    486. for new_i in u_detection:
    487. track = detections[new_i]
    488. if track.score < 0.6:
    489. continue
    490. track.activate(self.kalman_filter, self.frame_id)
    491. activated_tracks.append(track)
    492. """ Step 5: Update state"""
    493. for track in self.lost_tracks:
    494. if self.frame_id - track.end_frame > self.max_time_lost:
    495. track.mark_removed()
    496. removed_tracks.append(track)
    497. #print('ccc')
    498. self.tracked_tracks = [t for t in self.tracked_tracks if t.state == State.Tracked]
    499. self.tracked_tracks = self.joint_stracks(self.tracked_tracks, activated_tracks)
    500. self.tracked_tracks = self.joint_stracks(self.tracked_tracks, re_find_tracks)
    501. self.lost_tracks = self.sub_stracks(self.lost_tracks, self.tracked_tracks)
    502. self.lost_tracks.extend(lost_tracks)
    503. self.lost_tracks = self.sub_stracks(self.lost_tracks, self.removed_tracks)
    504. self.removed_tracks.extend(removed_tracks)
    505. self.tracked_tracks, self.lost_tracks = self.remove_duplicate_stracks(self.tracked_tracks, self.lost_tracks)
    506. output = [track.tlbr.tolist() + [track.track_id,
    507. track.score,
    508. track.cls,
    509. track.idx] for track in self.tracked_tracks if track.is_activated]
    510. return numpy.asarray(output, dtype=numpy.float32)
    511. @staticmethod
    512. def init_track(boxes, scores, cls):
    513. return [Track(box, s, c) for (box, s, c) in zip(boxes, scores, cls)] if len(boxes) else [] # detections
    514. @staticmethod
    515. def get_dists(tracks, detections):
    516. dists = iou_distance(tracks, detections)
    517. dists = fuse_score(dists, detections)
    518. return dists
    519. @staticmethod
    520. def multi_predict(tracks):
    521. Track.multi_predict(tracks)
    522. @staticmethod
    523. def reset_id():
    524. Track.reset_id()
    525. @staticmethod
    526. def joint_stracks(tlista, tlistb):
    527. exists = {}
    528. res = []
    529. for t in tlista:
    530. exists[t.track_id] = 1
    531. res.append(t)
    532. for t in tlistb:
    533. tid = t.track_id
    534. if not exists.get(tid, 0):
    535. exists[tid] = 1
    536. res.append(t)
    537. return res
    538. @staticmethod
    539. def sub_stracks(tlista, tlistb):
    540. stracks = {t.track_id: t for t in tlista}
    541. for t in tlistb:
    542. tid = t.track_id
    543. if stracks.get(tid, 0):
    544. del stracks[tid]
    545. return list(stracks.values())
    546. @staticmethod
    547. def remove_duplicate_stracks(stracksa, stracksb):
    548. pdist = iou_distance(stracksa, stracksb)
    549. pairs = numpy.where(pdist < 0.15)
    550. dupa, dupb = [], []
    551. for p, q in zip(*pairs):
    552. timep = stracksa[p].frame_id - stracksa[p].start_frame
    553. timeq = stracksb[q].frame_id - stracksb[q].start_frame
    554. if timep > timeq:
    555. dupb.append(q)
    556. else:
    557. dupa.append(p)
    558. resa = [t for i, t in enumerate(stracksa) if i not in dupa]
    559. resb = [t for i, t in enumerate(stracksb) if i not in dupb]
    560. return resa, resb

  • 相关阅读:
    「UI开发」DevExpress WPF Pivot Grid组件可轻松实现多维数据分析!(一)
    【小沐学前端】Node.js实现基于Protobuf协议的WebSocket通信
    R语言使用dev.print函数将当前最近的可视化结果保存为指定格式、dev.print函数不打开图像设备
    2011年09月01日 Go生态洞察:Go语言词法扫描与App Engine演示
    王学岗音视频开发(一)—————配置NDK开发环境
    关于mysql数据文件损坏导致的mysql无法启动的问题
    提高倾斜摄影三维模型顶层合并构建效率的技术方法初探
    单机高性能网络模型
    API是什么&API管理存在哪些问题,如何解决?
    STM32F103 CubeMX 使用USB生成鼠标设备
  • 原文地址:https://blog.csdn.net/zhaocj/article/details/133612221