• YOLOX训练-COCO格式


    目录

    1、准备COCO格式数据集

    2、修改

    (1)修改coco_classes

    (2)修改路径和训练数量num_classes 

    (3)重写 dataset 

     3、训练

    (1)接着上一次训练

     (2)使用tensorboard可视化loss

    (3)出现:OSError: [WinError 1455]页面文件太小,无法完成操作


    1、准备COCO格式数据集

    coco数据集目录结构

    在这里插入图片描述

     COCO数据集格式介绍

    将MOT17数据集转为COCO格式

    1. import os
    2. import numpy as np
    3. import json
    4. import cv2
    5. # Use the same script for MOT16
    6. DATA_PATH = 'E:\wode\code\YOLOX_deepsort_tracker-master\datasets\mot'
    7. OUT_PATH = os.path.join(DATA_PATH, 'annotations')
    8. SPLITS = ['train_half', 'val_half', 'train', 'test'] # --> split training data to train_half and val_half.
    9. HALF_VIDEO = True
    10. CREATE_SPLITTED_ANN = True
    11. CREATE_SPLITTED_DET = True
    12. if __name__ == '__main__':
    13. if not os.path.exists(OUT_PATH):
    14. os.makedirs(OUT_PATH)
    15. for split in SPLITS:
    16. if split == "test":
    17. data_path = os.path.join(DATA_PATH, 'test')
    18. else:
    19. data_path = os.path.join(DATA_PATH, 'train')
    20. out_path = os.path.join(OUT_PATH, '{}.json'.format(split))
    21. out = {'images': [], 'annotations': [], 'videos': [],
    22. 'categories': [{'id': 1, 'name': 'pedestrian'}]}
    23. seqs = os.listdir(data_path)
    24. image_cnt = 0
    25. ann_cnt = 0
    26. video_cnt = 0
    27. tid_curr = 0
    28. tid_last = -1
    29. for seq in sorted(seqs):
    30. if '.DS_Store' in seq:
    31. continue
    32. if 'mot' in DATA_PATH and (split != 'test' and not ('FRCNN' in seq)):
    33. continue
    34. video_cnt += 1 # video sequence number.
    35. out['videos'].append({'id': video_cnt, 'file_name': seq})
    36. seq_path = os.path.join(data_path, seq)
    37. img_path = os.path.join(seq_path, 'img1')
    38. ann_path = os.path.join(seq_path, 'gt/gt.txt')
    39. images = os.listdir(img_path)
    40. num_images = len([image for image in images if 'jpg' in image]) # half and half
    41. if HALF_VIDEO and ('half' in split):
    42. image_range = [0, num_images // 2] if 'train' in split else \
    43. [num_images // 2 + 1, num_images - 1]
    44. else:
    45. image_range = [0, num_images - 1]
    46. for i in range(num_images):
    47. if i < image_range[0] or i > image_range[1]:
    48. continue
    49. img = cv2.imread(os.path.join(data_path, '{}/img1/{:06d}.jpg'.format(seq, i + 1)))
    50. height, width = img.shape[:2]
    51. image_info = {'file_name': '{}/img1/{:06d}.jpg'.format(seq, i + 1), # image name.
    52. 'id': image_cnt + i + 1, # image number in the entire training set.
    53. 'frame_id': i + 1 - image_range[0], # image number in the video sequence, starting from 1.
    54. 'prev_image_id': image_cnt + i if i > 0 else -1, # image number in the entire training set.
    55. 'next_image_id': image_cnt + i + 2 if i < num_images - 1 else -1,
    56. 'video_id': video_cnt,
    57. 'height': height, 'width': width}
    58. out['images'].append(image_info)
    59. print('{}: {} images'.format(seq, num_images))
    60. if split != 'test':
    61. det_path = os.path.join(seq_path, 'det/det.txt')
    62. anns = np.loadtxt(ann_path, dtype=np.float32, delimiter=',')
    63. dets = np.loadtxt(det_path, dtype=np.float32, delimiter=',')
    64. if CREATE_SPLITTED_ANN and ('half' in split):
    65. anns_out = np.array([anns[i] for i in range(anns.shape[0])
    66. if int(anns[i][0]) - 1 >= image_range[0] and
    67. int(anns[i][0]) - 1 <= image_range[1]], np.float32)
    68. anns_out[:, 0] -= image_range[0]
    69. gt_out = os.path.join(seq_path, 'gt/gt_{}.txt'.format(split))
    70. fout = open(gt_out, 'w')
    71. for o in anns_out:
    72. fout.write('{:d},{:d},{:d},{:d},{:d},{:d},{:d},{:d},{:.6f}\n'.format(
    73. int(o[0]), int(o[1]), int(o[2]), int(o[3]), int(o[4]), int(o[5]),
    74. int(o[6]), int(o[7]), o[8]))
    75. fout.close()
    76. if CREATE_SPLITTED_DET and ('half' in split):
    77. dets_out = np.array([dets[i] for i in range(dets.shape[0])
    78. if int(dets[i][0]) - 1 >= image_range[0] and
    79. int(dets[i][0]) - 1 <= image_range[1]], np.float32)
    80. dets_out[:, 0] -= image_range[0]
    81. det_out = os.path.join(seq_path, 'det/det_{}.txt'.format(split))
    82. dout = open(det_out, 'w')
    83. for o in dets_out:
    84. dout.write('{:d},{:d},{:.1f},{:.1f},{:.1f},{:.1f},{:.6f}\n'.format(
    85. int(o[0]), int(o[1]), float(o[2]), float(o[3]), float(o[4]), float(o[5]),
    86. float(o[6])))
    87. dout.close()
    88. print('{} ann images'.format(int(anns[:, 0].max())))
    89. for i in range(anns.shape[0]):
    90. frame_id = int(anns[i][0])
    91. if frame_id - 1 < image_range[0] or frame_id - 1 > image_range[1]:
    92. continue
    93. track_id = int(anns[i][1])
    94. cat_id = int(anns[i][7])
    95. ann_cnt += 1
    96. if not ('15' in DATA_PATH):
    97. #if not (float(anns[i][8]) >= 0.25): # visibility.
    98. #continue
    99. if not (int(anns[i][6]) == 1): # whether ignore.
    100. continue
    101. if int(anns[i][7]) in [3, 4, 5, 6, 9, 10, 11]: # Non-person
    102. continue
    103. if int(anns[i][7]) in [2, 7, 8, 12]: # Ignored person
    104. category_id = -1
    105. else:
    106. category_id = 1 # pedestrian(non-static)
    107. if not track_id == tid_last:
    108. tid_curr += 1
    109. tid_last = track_id
    110. else:
    111. category_id = 1
    112. ann = {'id': ann_cnt,
    113. 'category_id': category_id,
    114. 'image_id': image_cnt + frame_id,
    115. 'track_id': tid_curr,
    116. 'bbox': anns[i][2:6].tolist(),
    117. 'conf': float(anns[i][6]),
    118. 'iscrowd': 0,
    119. 'area': float(anns[i][4] * anns[i][5])}
    120. out['annotations'].append(ann)
    121. image_cnt += num_images
    122. print(tid_curr, tid_last)
    123. print('loaded {} for {} images and {} samples'.format(split, len(out['images']), len(out['annotations'])))
    124. json.dump(out, open(out_path, 'w'))

    2、修改

    (1)修改coco_classes

    (2)修改路径和训练数量num_classes 

    (3)重写 dataset 

      

     3、训练

    python tools/train.py -f /path/to/your/Exp/file -d 8 -b 64 -c /path/to/the/pretrained/weights

    注:

    (1)接着上一次训练

    修改train.py

    resume改为True

    ckpt改为最后一个epoch的权重

    start_epoch根据需要改值
    1. parser.add_argument(
    2. # "--resume", default=False, action="store_true", help="resume training"
    3. "--resume", default=True, action="store_true",help="resume training"
    4. )
    5. parser.add_argument(
    6. "-c", "--ckpt",
    7. # default='pretrained/yolox_s.pth.tar',
    8. default='YOLOX_outputs/yolox_s/latest_ckpt.pth.tar',
    9. type=str, help="checkpoint file")
    10. parser.add_argument(
    11. "-e",
    12. "--start_epoch",
    13. # default=None,
    14. default=2,
    15. type=int,
    16. help="resume training start epoch",
    17. )

     (2)使用tensorboard可视化loss

    Terminal输入tensorboard --logdir=YOLOX_outputs/yolox_s/ --host=127.0.0.1

    点击链接 

    --logdir是可视化数据所在的文件夹

    如果出现:tensorboard : 无法将“tensorboard”项识别为 cmdlet、函数、脚本文件或可运行程序的名称

    查看conda环境是不是当前所用的环境

    在pycharm的terminal终端默认用的是base环境,需要通过activate命令激活到你所用的环境,再输入tensorboard命令。

    如果在pycharm的terminal终端通过activate命令激活时无法激活到对应环境,转到File -> Settings -> Tools -> Termina

    保存之后关闭pycharm重新打开。

    (3)出现:OSError: [WinError 1455]页面文件太小,无法完成操作

    报的错误是页面文件太小,所以需要调大页面文件的大小。电脑在默认情况下没有给C盘以外的磁盘分配虚拟内存,所以如果将Anaconda装在C盘以外的话,需要给Anaconda所在的那个磁盘分配虚拟内存即可。当然,如果就是安装在C盘,那么就将虚拟内存值调大一些。

    此电脑-属性-高级系统设置

    重启计算机

  • 相关阅读:
    cfssl使用方法重新整理说明
    [附源码]Python计算机毕业设计Django求职招聘网站
    IDEA本地将镜像推送到coding制品仓库
    String类
    阿里云对象存储oss——对象储存原子性和强一致性
    oracle 19c 统计信息详解
    11.9 至 11.17 四道典型题记录: Counter 弹出 | map函数 | 子集求取 | 有序字符桶分装
    性能测试 之进程上下文切换问题分析
    SpringBoot 学习(一)自动装配
    C++ 惯用法之 PIMPL (接口类设计技巧)
  • 原文地址:https://blog.csdn.net/ZZhangYajuan/article/details/127085260