• 基于轻量级卷积神经网络CNN开发构建打架斗殴识别分析系统


    在很多公共场合中,因为一些不可控因素导致最终爆发打架斗殴或者大规则冲突事件的案例层出不穷,基于视频监控等技术手段智能自动化地识别出已有或者潜在的危险行为对于维护公共场合的安全稳定有着重要的意义。本文的核心目的就是想要基于CNN模型来尝试开发构建公众场景下的打架斗殴行为识别系统,这里我们从互联网中采集了相当批量的聚众数据,首先看下效果:
     

    接下来看下数据集:

    【有打架斗殴】

    【无打架斗殴】

    数据主要来源于互联网数据采集和后期的人工处理标注。

    可以借助我编写的数据集随机划分函数,便捷地生成训练集-测试集,如下所示:

    1. def randomSplit(dataDir="data/",train_ratio=0.80):
    2. """
    3. 数据集随机划分
    4. """
    5. if not os.path.exists("labels.json") or not os.path.exists("dataset.json"):
    6. pic_list=[]
    7. labels_list = os.listdir(dataDir)
    8. labels_list.sort()
    9. print("labels_list: ", labels_list)
    10. with open("labels.json", "w") as f:
    11. f.write(json.dumps(labels_list))
    12. for one_label in os.listdir(dataDir):
    13. oneDir=dataDir+one_label+"/"
    14. one_list=[oneDir+one for one in os.listdir(oneDir)]
    15. pic_list+=one_list
    16. length=len(pic_list)
    17. print("length: ", length)
    18. train_num=int(length*train_ratio)
    19. test_num=length-train_num
    20. print("train_num: ", train_num, ", test_num: ", test_num)
    21. train_list=random.sample(pic_list, train_num)
    22. test_list=[one for one in pic_list if one not in train_list]
    23. dataset={}
    24. dataset["train"]=train_list
    25. dataset["test"]=test_list
    26. with open("dataset.json","w") as f:
    27. f.write(json.dumps(dataset))
    28. else:
    29. print("Not Need To SplitData Again!!!!!")

    接下来需要加载本地图像数据来解析创建可直接用于后续模型训练计算的数据集,核心实现如下所示:

    1. def buildDataset():
    2. """
    3. 加载本地数据集创建数据集
    4. """
    5. X_train, y_train = [], []
    6. X_test, y_test = [], []
    7. train_list=dataset["train"]
    8. test_list=dataset["test"]
    9. picDir = "data/train/"
    10. #训练集
    11. for one_path in train_list:
    12. try:
    13. print("one_path: ", one_path)
    14. one_img = parse4Img(one_path)
    15. one_y = parse4Label(one_pic_classes,labels_list)
    16. X_train.append(one_img)
    17. y_train.append(one_y)
    18. except Exception as e:
    19. print("train Exception: ", e)
    20. X_train = np.array(X_train)
    21. #测试集
    22. for one_path in test_list:
    23. try:
    24. print("one_path: ", one_path)
    25. one_img = parse4Img(one_path)
    26. one_y = parse4Label(one_pic_classes,labels_list)
    27. X_test.append(one_img)
    28. y_test.append(one_y)
    29. except Exception as e:
    30. print("test Exception: ", e)
    31. X_test = np.array(X_test)

    完成数据集的解析构建之后就可以进行模型的开发训练工作了。

    这部分可以参考我前面的博文的实现即可,这里就不再赘述了。本文中搭建的轻量级模型结构详情如下所示:

    1. {
    2. "class_name": "Sequential",
    3. "config": {
    4. "name": "sequential_1",
    5. "layers": [{
    6. "class_name": "Conv2D",
    7. "config": {
    8. "name": "conv2d_1",
    9. "trainable": true,
    10. "batch_input_shape": [null, 100, 100, 3],
    11. "dtype": "float32",
    12. "filters": 64,
    13. "kernel_size": [3, 3],
    14. "strides": [2, 2],
    15. "padding": "same",
    16. "data_format": "channels_last",
    17. "dilation_rate": [1, 1],
    18. "activation": "relu",
    19. "use_bias": true,
    20. "kernel_initializer": {
    21. "class_name": "RandomUniform",
    22. "config": {
    23. "minval": -0.05,
    24. "maxval": 0.05,
    25. "seed": null
    26. }
    27. },
    28. "bias_initializer": {
    29. "class_name": "Zeros",
    30. "config": {}
    31. },
    32. "kernel_regularizer": null,
    33. "bias_regularizer": null,
    34. "activity_regularizer": null,
    35. "kernel_constraint": null,
    36. "bias_constraint": null
    37. }
    38. }, {
    39. "class_name": "MaxPooling2D",
    40. "config": {
    41. "name": "max_pooling2d_1",
    42. "trainable": true,
    43. "pool_size": [2, 2],
    44. "padding": "valid",
    45. "strides": [2, 2],
    46. "data_format": "channels_last"
    47. }
    48. }, {
    49. "class_name": "Conv2D",
    50. "config": {
    51. "name": "conv2d_2",
    52. "trainable": true,
    53. "filters": 128,
    54. "kernel_size": [3, 3],
    55. "strides": [2, 2],
    56. "padding": "same",
    57. "data_format": "channels_last",
    58. "dilation_rate": [1, 1],
    59. "activation": "relu",
    60. "use_bias": true,
    61. "kernel_initializer": {
    62. "class_name": "RandomUniform",
    63. "config": {
    64. "minval": -0.05,
    65. "maxval": 0.05,
    66. "seed": null
    67. }
    68. },
    69. "bias_initializer": {
    70. "class_name": "Zeros",
    71. "config": {}
    72. },
    73. "kernel_regularizer": null,
    74. "bias_regularizer": null,
    75. "activity_regularizer": null,
    76. "kernel_constraint": null,
    77. "bias_constraint": null
    78. }
    79. }, {
    80. "class_name": "MaxPooling2D",
    81. "config": {
    82. "name": "max_pooling2d_2",
    83. "trainable": true,
    84. "pool_size": [2, 2],
    85. "padding": "valid",
    86. "strides": [2, 2],
    87. "data_format": "channels_last"
    88. }
    89. }, {
    90. "class_name": "Conv2D",
    91. "config": {
    92. "name": "conv2d_3",
    93. "trainable": true,
    94. "filters": 256,
    95. "kernel_size": [3, 3],
    96. "strides": [2, 2],
    97. "padding": "same",
    98. "data_format": "channels_last",
    99. "dilation_rate": [1, 1],
    100. "activation": "relu",
    101. "use_bias": true,
    102. "kernel_initializer": {
    103. "class_name": "RandomUniform",
    104. "config": {
    105. "minval": -0.05,
    106. "maxval": 0.05,
    107. "seed": null
    108. }
    109. },
    110. "bias_initializer": {
    111. "class_name": "Zeros",
    112. "config": {}
    113. },
    114. "kernel_regularizer": null,
    115. "bias_regularizer": null,
    116. "activity_regularizer": null,
    117. "kernel_constraint": null,
    118. "bias_constraint": null
    119. }
    120. }, {
    121. "class_name": "MaxPooling2D",
    122. "config": {
    123. "name": "max_pooling2d_3",
    124. "trainable": true,
    125. "pool_size": [2, 2],
    126. "padding": "valid",
    127. "strides": [2, 2],
    128. "data_format": "channels_last"
    129. }
    130. }, {
    131. "class_name": "Flatten",
    132. "config": {
    133. "name": "flatten_1",
    134. "trainable": true,
    135. "data_format": "channels_last"
    136. }
    137. }, {
    138. "class_name": "Dense",
    139. "config": {
    140. "name": "dense_1",
    141. "trainable": true,
    142. "units": 256,
    143. "activation": "relu",
    144. "use_bias": true,
    145. "kernel_initializer": {
    146. "class_name": "VarianceScaling",
    147. "config": {
    148. "scale": 1.0,
    149. "mode": "fan_avg",
    150. "distribution": "uniform",
    151. "seed": null
    152. }
    153. },
    154. "bias_initializer": {
    155. "class_name": "Zeros",
    156. "config": {}
    157. },
    158. "kernel_regularizer": null,
    159. "bias_regularizer": null,
    160. "activity_regularizer": null,
    161. "kernel_constraint": null,
    162. "bias_constraint": null
    163. }
    164. }, {
    165. "class_name": "Dropout",
    166. "config": {
    167. "name": "dropout_1",
    168. "trainable": true,
    169. "rate": 0.1,
    170. "noise_shape": null,
    171. "seed": null
    172. }
    173. }, {
    174. "class_name": "Dense",
    175. "config": {
    176. "name": "dense_2",
    177. "trainable": true,
    178. "units": 512,
    179. "activation": "relu",
    180. "use_bias": true,
    181. "kernel_initializer": {
    182. "class_name": "VarianceScaling",
    183. "config": {
    184. "scale": 1.0,
    185. "mode": "fan_avg",
    186. "distribution": "uniform",
    187. "seed": null
    188. }
    189. },
    190. "bias_initializer": {
    191. "class_name": "Zeros",
    192. "config": {}
    193. },
    194. "kernel_regularizer": null,
    195. "bias_regularizer": null,
    196. "activity_regularizer": null,
    197. "kernel_constraint": null,
    198. "bias_constraint": null
    199. }
    200. }, {
    201. "class_name": "Dropout",
    202. "config": {
    203. "name": "dropout_2",
    204. "trainable": true,
    205. "rate": 0.15,
    206. "noise_shape": null,
    207. "seed": null
    208. }
    209. }, {
    210. "class_name": "Dense",
    211. "config": {
    212. "name": "dense_3",
    213. "trainable": true,
    214. "units": 2,
    215. "activation": "softmax",
    216. "use_bias": true,
    217. "kernel_initializer": {
    218. "class_name": "VarianceScaling",
    219. "config": {
    220. "scale": 1.0,
    221. "mode": "fan_avg",
    222. "distribution": "uniform",
    223. "seed": null
    224. }
    225. },
    226. "bias_initializer": {
    227. "class_name": "Zeros",
    228. "config": {}
    229. },
    230. "kernel_regularizer": null,
    231. "bias_regularizer": null,
    232. "activity_regularizer": null,
    233. "kernel_constraint": null,
    234. "bias_constraint": null
    235. }
    236. }]
    237. },
    238. "keras_version": "2.2.4",
    239. "backend": "tensorflow"
    240. }

    默认200次epoch的迭代计算,训练完成后,对loss曲线和acc曲线进行了对比可视化展示,如下所示:

    【loss对比曲线】

    【acc对比曲线】

    可以看到:模型的效果还是很不错的。

    最后编写专用的可视化系统界面,进行实例化推理展示,实例结果如下所示:

    后续的工作考虑结合视频连续帧的特点来进一步提升打架斗殴行为的识别精度,感兴趣的话也都可以自行尝试实践一下。

  • 相关阅读:
    Android 监听WebView加载失败
    【总线】AXI第十课时:AXI协议的Ordering Model 使用ID tag
    二叉树的基本认识(三)
    OFDM 十六讲 2- OFDM and the DFT
    SpringCloud 组件Gateway服务网关【全局过滤器】
    VivadoAndTcl: synth_ip
    Ant Design Table 单元格文字过长用省略号表示
    隐私计算 FATE - 多分类神经网络算法测试
    GD32F103 硬件 IIC
    PyTorch深度解析:Tensor——神经网络的核心构建块
  • 原文地址:https://blog.csdn.net/Together_CZ/article/details/134318464