• deepstream 检测结果截图


    目录

    1. 如何访问GstBuffer数据

    2.2 Makefile文件的修改

    pgie_pad_buffer_probe获取图片,转opencv mat

     sgie获取帧数据,转opencv mat图:


    以下内容转自:

    deepstream实现视频推理结果截图_hello_dear_you的博客-CSDN博客

    这个是deepstream论坛中相关问题的回答,具体可以阅读下面上个相关问题的内容

    1. Access frame pointer in deepstream-app
    2. Frame data extraction and pipeline halts/hangs after 7 minutes of run​​​​​​
    3. Semantic Segmentation in DS 4
    同样,在CSDN中也找到相关博客的介绍

    1. DeepStream结合OpenCV4实现视频的分析和截图(一)

    2. DeepStream结合OpenCV4实现视频的分析和截图(二)

    3. Ubuntu18.04.3 安装 Nvidia DeepStream 并在 P4 显卡上解码取帧存图

    此外,还参考了OpenCV对于RGBA图像操作的相关内容

    1. OpenCV入门(4):透明图像(RGBA)的处理

    2. Opencv 图像读取与保存问题

    1. 如何访问GstBuffer数据


    通过上述分析,我们知道可以在probe回调函数中来保存图像信息。此时我们应该根据自己的需求,选择相应element的pad来设置probe回调函。根据OSD之前和之后获取数据,可以分为两大类:

    OSD element之前保存数据:可以选择nvinfer的src pad(buffer type为NV12)或者nvvidconv的src pad(buffer type为RGBA)来访问数据
    OSD element之后保存数据:也即选择osd 的src pad(buffer type为RGBA)
    上述两种类型之前的区别,应该在于检测信息是否已经渲染到frame_meta数据中。

    下面以osd_sink_pad_buffer_probe回调函数举例,如何访问GstBuffer数据?

      /* osd_sink_pad_buffer_probe  will extract metadata received on OSD sink pad
     * and update params for drawing rectangle, object information etc. */
     
    static GstPadProbeReturn
    osd_sink_pad_buffer_probe (GstPad * pad, GstPadProbeInfo * info,
        gpointer u_data)
    {
        GstBuffer *buf = (GstBuffer *) info->data;  
     
        char* src_data = NULL;
        if (!gst_buffer_map (buf, &in_map_info, GST_MAP_READ)) 
        {
              g_print ("Error: Failed to map gst buffer\n");
              gst_buffer_unmap (buf, &in_map_info);
              return GST_PAD_PROBE_OK;
        }
        NvBufSurface *surface = (NvBufSurface *)in_map_info.data;
     
        gst_buffer_unmap (buf, &in_map_info);
    }

    2.2 Makefile文件的修改


    1. 添加opencv的头文件

    CFLAGS+= -I/opt/nvidia/deepstream/deepstream-5.1/sources/includes \
             -I /usr/local/cuda-$(CUDA_VER)/include \
             -fPIC -std=c++17 -g    \
             -I/usr/local/opencv-4.5.1/include/opencv4
    2. 添加相对应的库文件

    -lnvbufsurface -lnvbufsurftransform
    -L/usr/local/opencv-4.5.1/lib -lopencv_core -lopencv_imgproc -lopencv_imgcodecs 
    LIBS+= -L$(LIB_INSTALL_DIR) -lnvdsgst_meta -lnvds_meta -lnvdsgst_helper -lm  -lnvbufsurface -lnvbufsurftransform \
           -L/usr/local/cuda-$(CUDA_VER)/lib64/ -lcudart \
           -lcuda -Wl,-rpath,$(LIB_INSTALL_DIR)    \
           -L/usr/local/opencv-4.5.1/lib -lopencv_core -lopencv_imgproc -lopencv_imgcodecs
    通过上述的过程,我们就能保存带检测结果的图像帧数据。
     

    原文链接:https://blog.csdn.net/hello_dear_you/article/details/121687664

    pgie_pad_buffer_probe获取图片,转opencv mat

    static GstPadProbeReturn pgie_pad_buffer_probe(GstPad *pad, GstPadProbeInfo *info, gpointer u_data)

    1. #include
    2. #include
    3. #include
    4. #include
    5. #include
    6. #include
    7. #include
    8. #include
    9. #include
    10. #include "gst-nvmessage.h"
    11. #include "gstnvdsmeta.h"
    12. #include "nvdsmeta_schema.h"
    13. #include "nvdsinfer_custom_impl.h"
    14. #include "nvbufsurface.h"
    15. #include
    16. #include "cuda_runtime_api.h"
    17. #include "gstnvdsinfer.h"
    18. #include
    19. #include "nvds_parse_bbox_wpod.h"
    20. #include "gst-nvdssr.h"
    21. #define OUTPUT_FILE "output/out.mp4"
    22. #define INTERVAL 2
    23. #define OSD_PROCESS_MODE 0
    24. #define SGIE_INPUT_H 32
    25. #define SGIE_INPUT_W 100
    26. #define SGIE_OUTPUT_SIZE 26 * 37
    27. #define SGIE_PAD_OUT 8
    28. /* By default, OSD will not display text. To display text, change this to 1 */
    29. #define OSD_DISPLAY_TEXT 1
    30. #define PGIE_NET_WIDTH 256
    31. #define PGIE_NET_HEIGHT 256
    32. #define MUXER_OUTPUT_WIDTH 1920
    33. #define MUXER_OUTPUT_HEIGHT 1080
    34. #define TILED_OUTPUT_WIDTH 1920
    35. #define TILED_OUTPUT_HEIGHT 1080
    36. #define MUXER_BATCH_TIMEOUT_USEC 25000
    37. #define MAX_DISPLAY_LEN 64
    38. #define MEMORY_FEATURES "memory:NVMM"
    39. #define PGIE_CONFIG_FILE "config_pgie.txt"
    40. #define SGIE_CONFIG_FILE "config_sgie.txt"
    41. #define START_TIME 0
    42. #define SHOW_CRNN 0
    43. const std::string alphabet = "-0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
    44. // ./deepstream-ALPR file:///opt/nvidia/deepstream/deepstream-5.0/sources/alpr_ds/wpod/deepstream_app_wpod/test1.mp4
    45. /* Duration of recording
    46. */
    47. bool display = FALSE;
    48. extern "C" bool NvDsInferParseCustomWpod(
    49. std::vector const &outputLayersInfo,
    50. NvDsInferNetworkInfo const &networkInfo,
    51. NvDsInferParseDetectionParams const &detectionParams,
    52. std::vector &objectList);
    53. std::string strDecode(std::vector<int> &preds, bool raw)
    54. {
    55. std::string str;
    56. if (raw)
    57. {
    58. for (auto v : preds)
    59. {
    60. str.push_back(alphabet[v]);
    61. }
    62. }
    63. else
    64. {
    65. for (size_t i = 0; i < preds.size(); i++)
    66. {
    67. if (preds[i] == 0 || (i > 0 && preds[i - 1] == preds[i]))
    68. continue;
    69. str.push_back(alphabet[preds[i]]);
    70. }
    71. }
    72. return str;
    73. }
    74. static gboolean bus_call(GstBus *bus, GstMessage *msg, gpointer data)
    75. {
    76. GMainLoop *loop = (GMainLoop *)data;
    77. switch (GST_MESSAGE_TYPE(msg))
    78. {
    79. case GST_MESSAGE_EOS:
    80. {
    81. g_print("End of stream\n");
    82. g_main_loop_quit(loop);
    83. break;
    84. }
    85. case GST_MESSAGE_WARNING:
    86. {
    87. gchar *debug;
    88. GError *error;
    89. gst_message_parse_warning(msg, &error, &debug);
    90. g_printerr("WARNING from element %s: %s\n", GST_OBJECT_NAME(msg->src), error->message);
    91. g_free(debug);
    92. g_printerr("Warning: %s\n", error->message);
    93. g_error_free(error);
    94. break;
    95. }
    96. case GST_MESSAGE_ERROR:
    97. {
    98. gchar *debug;
    99. GError *error;
    100. gst_message_parse_error(msg, &error, &debug);
    101. g_printerr("ERROR from element %s: %s\n",
    102. GST_OBJECT_NAME(msg->src), error->message);
    103. if (debug)
    104. g_printerr("Error details: %s\n", debug);
    105. g_free(debug);
    106. g_error_free(error);
    107. g_main_loop_quit(loop);
    108. break;
    109. }
    110. #ifdef PLATFORM_TEGRA
    111. case GST_MESSAGE_ELEMENT:
    112. {
    113. if (gst_nvmessage_is_stream_eos(msg))
    114. {
    115. guint stream_id;
    116. if (gst_nvmessage_parse_stream_eos(msg, &stream_id))
    117. {
    118. g_print("Got EOS from stream %d\n", stream_id);
    119. }
    120. }
    121. break;
    122. }
    123. #endif
    124. default:
    125. break;
    126. }
    127. return TRUE;
    128. }
    129. static GstPadProbeReturn pgie_pad_buffer_probe(GstPad *pad, GstPadProbeInfo *info, gpointer u_data)
    130. {
    131. NvBufSurface *surface = NULL;
    132. GstBuffer *inbuf = GST_PAD_PROBE_INFO_BUFFER(info);
    133. GstMapInfo in_map_info;
    134. NvDsMetaList *l_obj = NULL;
    135. NvDsObjectMeta *obj_meta = NULL;
    136. memset(&in_map_info, 0, sizeof(in_map_info));
    137. if (!gst_buffer_map(inbuf, &in_map_info, GST_MAP_READ))
    138. {
    139. g_error("Error: Failed to map gst buffer\n");
    140. }
    141. surface = (NvBufSurface *)in_map_info.data;
    142. NvDsBatchMeta *batch_meta =
    143. gst_buffer_get_nvds_batch_meta(inbuf);
    144. static guint use_device_mem = 0;
    145. static NvDsInferNetworkInfo networkInfo{3, PGIE_NET_WIDTH, PGIE_NET_HEIGHT};
    146. NvDsInferParseDetectionParams detectionParams;
    147. #ifndef PLATFORM_TEGRA
    148. // if (surface->memType != NVBUF_MEM_CUDA_UNIFIED)
    149. // {
    150. // g_error("need NVBUF_MEM_CUDA_UNIFIED memory for opencv\n");
    151. // }
    152. #endif
    153. /* Iterate each frame metadata in batch */
    154. for (NvDsMetaList *l_frame = batch_meta->frame_meta_list; l_frame != NULL; l_frame = l_frame->next)
    155. {
    156. NvDsFrameMeta *frame_meta = (NvDsFrameMeta *)l_frame->data;
    157. cv::Mat in_mat;
    158. if (surface->surfaceList[frame_meta->batch_id].mappedAddr.addr[0] == NULL)
    159. {
    160. if (NvBufSurfaceMap(surface, frame_meta->batch_id, 0, NVBUF_MAP_READ_WRITE) != 0)
    161. {
    162. g_error("buffer map to be accessed by CPU failed\n");
    163. }
    164. }
    165. /* Cache the mapped data for CPU access */
    166. NvBufSurfaceSyncForCpu(surface, frame_meta->batch_id, 0);
    167. in_mat =
    168. cv::Mat(surface->surfaceList[frame_meta->batch_id].planeParams.height[0],
    169. surface->surfaceList[frame_meta->batch_id].planeParams.width[0], CV_8UC4,
    170. surface->surfaceList[frame_meta->batch_id].mappedAddr.addr[0],
    171. surface->surfaceList[frame_meta->batch_id].planeParams.pitch[0]);
    172. // cv::imwrite("output/in_im.jpg", in_mat);
    173. // cv::Mat bgr_frame = cv::Mat(cv::Size(surface->surfaceList[frame_meta->batch_id].planeParams.height[0], surface->surfaceList[frame_meta->batch_id].planeParams.width[0]), CV_8UC3);
    174. // cv::cvtColor(in_mat, bgr_frame, cv::COLOR_RGBA2BGR);
    175. // cv::imwrite("output/a.jpg", bgr_frame);
    176. /* Iterate user metadata in frames to search PGIE's tensor metadata */
    177. for (NvDsMetaList *l_user = frame_meta->frame_user_meta_list; l_user != NULL; l_user = l_user->next)
    178. {
    179. NvDsUserMeta *user_meta = (NvDsUserMeta *)l_user->data;
    180. if (user_meta->base_meta.meta_type != NVDSINFER_TENSOR_OUTPUT_META)
    181. continue;
    182. /* convert to tensor metadata */
    183. NvDsInferTensorMeta *meta = (NvDsInferTensorMeta *)user_meta->user_meta_data;
    184. for (unsigned int i = 0; i < meta->num_output_layers; i++)
    185. {
    186. NvDsInferLayerInfo *info = &meta->output_layers_info[i];
    187. info->buffer = meta->out_buf_ptrs_host[i];
    188. if (use_device_mem && meta->out_buf_ptrs_dev[i])
    189. {
    190. cudaMemcpy(meta->out_buf_ptrs_host[i], meta->out_buf_ptrs_dev[i],
    191. info->inferDims.numElements * 4, cudaMemcpyDeviceToHost);
    192. }
    193. }
    194. // g_print("\n1 %d\n", meta->num_output_layers);
    195. /* Parse output tensor and fill detection results into objectList. */
    196. std::vector outputLayersInfo(meta->output_layers_info,
    197. meta->output_layers_info + meta->num_output_layers);
    198. std::vector objectList;
    199. #if NVDS_VERSION_MAJOR >= 5
    200. if (nvds_lib_major_version >= 5)
    201. {
    202. if (meta->network_info.width != networkInfo.width ||
    203. meta->network_info.height != networkInfo.height ||
    204. meta->network_info.channels != networkInfo.channels)
    205. {
    206. g_error("failed to check pgie network info\n");
    207. }
    208. }
    209. #endif
    210. NvDsInferParseCustomWpod(outputLayersInfo, networkInfo,
    211. detectionParams, objectList);
    212. for (auto &obj : objectList)
    213. {
    214. NvDsObjectMeta *obj_meta = nvds_acquire_obj_meta_from_pool(batch_meta);
    215. obj_meta->unique_component_id = meta->unique_id;
    216. obj_meta->confidence = 0.0;
    217. /* This is an untracked object. Set tracking_id to -1. */
    218. obj_meta->object_id = UNTRACKED_OBJECT_ID;
    219. obj_meta->class_id = 0;
    220. // g_print("\nleft:%f\ttop:%f\twidth:%f\theight:%f", obj.left, obj.top, obj.width, obj.height);
    221. cv::Rect plate_rect = cv::Rect(obj.left * MUXER_OUTPUT_WIDTH / PGIE_NET_WIDTH, obj.top * MUXER_OUTPUT_HEIGHT / PGIE_NET_HEIGHT, obj.width * MUXER_OUTPUT_WIDTH / PGIE_NET_WIDTH, obj.height * MUXER_OUTPUT_HEIGHT / PGIE_NET_HEIGHT);
    222. // cv::Mat bgr = in_mat(plate_rect);
    223. cv::Mat cropref = cv::Mat(in_mat, plate_rect);
    224. cv::Mat crop;
    225. cropref.copyTo(crop);
    226. //cv::imwrite("output/" + std::to_string(idx) + "raw.jpg", crop);
    227. cv::Size out_size;
    228. // bool type;
    229. if (obj.width / obj.height < 1.7)
    230. {
    231. out_size.width = 280;
    232. out_size.height = 200;
    233. // type = 0;
    234. }
    235. else
    236. {
    237. out_size.width = 470;
    238. out_size.height = 110;
    239. // type = 1;
    240. }
    241. std::vectorfloat>> H = {{0, 0, 0}, {0, 0, 0}, {0, 0, 0}};
    242. float t_pts[3][4];
    243. getRectPts(t_pts, 0, 0, out_size.width, out_size.height);
    244. float ptsh[3][4];
    245. for (int j = 0; j < 4; j++)
    246. {
    247. ptsh[0][j] = obj.pts[0][j] * MUXER_OUTPUT_WIDTH / PGIE_NET_WIDTH;
    248. }
    249. for (int j = 0; j < 4; j++)
    250. {
    251. ptsh[1][j] = obj.pts[1][j] * MUXER_OUTPUT_HEIGHT / PGIE_NET_HEIGHT;
    252. }
    253. for (int i = 0; i < 4; i++)
    254. {
    255. ptsh[2][i] = 1.0;
    256. }
    257. find_T_matrix(t_pts, ptsh, H);
    258. float Hf[3][3];
    259. for (int i = 0; i < 3; i++)
    260. {
    261. for (int j = 0; j < 3; j++)
    262. {
    263. Hf[i][j] = H[i][j];
    264. }
    265. }
    266. cv::Mat H1(3, 3, CV_32F);
    267. std::memcpy(H1.data, Hf, 3 * 3 * sizeof(float));
    268. cv::Mat tmp_img;
    269. cv::warpPerspective(in_mat, tmp_img, H1, out_size, 1, 0);
    270. //cv::imwrite("output/im" + std::to_string(idx) + ".jpg", tmp_img);
    271. cv::resize(tmp_img, tmp_img, cv::Size(SGIE_INPUT_W, SGIE_INPUT_H));
    272. tmp_img.copyTo(in_mat(cv::Rect(SGIE_PAD_OUT, SGIE_PAD_OUT, SGIE_INPUT_W, SGIE_INPUT_H)));
    273. /* Assign bounding box coordinates. */
    274. NvOSD_RectParams &rect_params = obj_meta->rect_params;
    275. NvOSD_TextParams &text_params = obj_meta->text_params;
    276. NvOSD_RectParams &sgie_rect_params = obj_meta->sgie_rect_params;
    277. rect_params.left = obj.left * MUXER_OUTPUT_WIDTH / PGIE_NET_WIDTH;
    278. rect_params.top = obj.top * MUXER_OUTPUT_HEIGHT / PGIE_NET_HEIGHT;
    279. rect_params.width = obj.width * MUXER_OUTPUT_WIDTH / PGIE_NET_WIDTH;
    280. rect_params.height = obj.height * MUXER_OUTPUT_HEIGHT / PGIE_NET_HEIGHT;
    281. rect_params.border_width = 3;
    282. rect_params.has_bg_color = 0;
    283. rect_params.border_color = (NvOSD_ColorParams){0, 1, 0, 1};
    284. /* SGIE bounding box*/
    285. sgie_rect_params.left = SGIE_PAD_OUT;
    286. sgie_rect_params.top = SGIE_PAD_OUT;
    287. sgie_rect_params.width = SGIE_INPUT_W;
    288. sgie_rect_params.height = SGIE_INPUT_H;
    289. /* display_text requires heap allocated memory. */
    290. // text_params.display_text = g_strdup(type ? "Bien 1 hang" : "Bien 2 hang");
    291. /* Display text above the left top corner of the object. */
    292. text_params.x_offset = rect_params.left;
    293. text_params.y_offset = rect_params.top - 10;
    294. /* Set black background for the text. */
    295. text_params.set_bg_clr = 1;
    296. text_params.text_bg_clr = (NvOSD_ColorParams){0, 0, 0, 1};
    297. /* Font face, size and color. */
    298. text_params.font_params.font_name = (gchar *)"Serif";
    299. text_params.font_params.font_size = 20;
    300. text_params.font_params.font_color = (NvOSD_ColorParams){1, 1, 1, 1};
    301. nvds_add_obj_meta_to_frame(frame_meta, obj_meta, NULL);
    302. NvBufSurfaceSyncForDevice(surface, frame_meta->batch_id, 0);
    303. }
    304. }
    305. NvBufSurfaceUnMap(surface, frame_meta->batch_id, 0);
    306. }
    307. use_device_mem = 1 - use_device_mem;
    308. gst_buffer_unmap(inbuf, &in_map_info);
    309. return GST_PAD_PROBE_OK;
    310. }
    311. static GstPadProbeReturn
    312. sgie_pad_buffer_probe(GstPad *pad, GstPadProbeInfo *info, gpointer u_data)
    313. {
    314. NvDsMetaList *l_obj = NULL;
    315. NvDsObjectMeta *obj_meta = NULL;
    316. NvDsBatchMeta *batch_meta =
    317. gst_buffer_get_nvds_batch_meta(GST_BUFFER(info->data));
    318. static guint use_device_mem = 0;
    319. for (NvDsMetaList *l_frame = batch_meta->frame_meta_list; l_frame != NULL; l_frame = l_frame->next)
    320. {
    321. NvDsFrameMeta *frame_meta = (NvDsFrameMeta *)l_frame->data;
    322. for (NvDsMetaList *l_obj = frame_meta->obj_meta_list; l_obj != NULL;
    323. l_obj = l_obj->next)
    324. {
    325. NvDsObjectMeta *obj_meta = (NvDsObjectMeta *)l_obj->data;
    326. for (NvDsMetaList *l_user = obj_meta->obj_user_meta_list; l_user != NULL;
    327. l_user = l_user->next)
    328. {
    329. NvDsUserMeta *user_meta = (NvDsUserMeta *)l_user->data;
    330. if (user_meta->base_meta.meta_type != NVDSINFER_TENSOR_OUTPUT_META)
    331. continue;
    332. /* convert to tensor metadata */
    333. NvDsInferTensorMeta *meta = (NvDsInferTensorMeta *)user_meta->user_meta_data;
    334. for (unsigned int i = 0; i < meta->num_output_layers; i++)
    335. {
    336. NvDsInferLayerInfo *info = &meta->output_layers_info[i];
    337. info->buffer = meta->out_buf_ptrs_host[i];
    338. if (use_device_mem && meta->out_buf_ptrs_dev[i])
    339. {
    340. cudaMemcpy(meta->out_buf_ptrs_host[i], meta->out_buf_ptrs_dev[i],
    341. info->inferDims.numElements * 4, cudaMemcpyDeviceToHost);
    342. }
    343. }
    344. std::vector outputLayersInfo(meta->output_layers_info,
    345. meta->output_layers_info + meta->num_output_layers);
    346. float *prob = (float *)outputLayersInfo[0].buffer;
    347. std::vector<int> preds;
    348. for (int i = 0; i < 26; i++)
    349. {
    350. int maxj = 0;
    351. for (int j = 1; j < 37; j++)
    352. {
    353. if (prob[37 * i + j] > prob[37 * i + maxj])
    354. maxj = j;
    355. }
    356. preds.push_back(maxj);
    357. }
    358. std::string raw = strDecode(preds, true);
    359. std::string sim = strDecode(preds, false);
    360. if (SHOW_CRNN)
    361. {
    362. g_print("\nIdx = \nRaw : %s", raw.c_str());
    363. g_print("\nSim : %s", sim.c_str());
    364. }
    365. else
    366. {
    367. obj_meta->text_params.display_text = g_strdup(sim.c_str());
    368. }
    369. }
    370. }
    371. }
    372. return GST_PAD_PROBE_OK;
    373. }
    374. static void
    375. cb_newpad(GstElement *decodebin, GstPad *decoder_src_pad, gpointer data)
    376. {
    377. GstCaps *caps = gst_pad_get_current_caps(decoder_src_pad);
    378. const GstStructure *str = gst_caps_get_structure(caps, 0);
    379. const gchar *name = gst_structure_get_name(str);
    380. GstElement *source_bin = (GstElement *)data;
    381. GstCapsFeatures *features = gst_caps_get_features(caps, 0); //
    382. if (!strncmp(name, "video", 5))
    383. {
    384. if (gst_caps_features_contains(features, MEMORY_FEATURES))
    385. {
    386. GstPad *bin_ghost_pad = gst_element_get_static_pad(source_bin, "src");
    387. if (!gst_ghost_pad_set_target(GST_GHOST_PAD(bin_ghost_pad),
    388. decoder_src_pad))
    389. {
    390. g_printerr("Failed to link decoder src pad to source bin ghost pad\n");
    391. }
    392. gst_object_unref(bin_ghost_pad);
    393. }
    394. else
    395. {
    396. g_printerr("Error: Decodebin did not pick nvidia decoder plugin.\n");
    397. }
    398. }
    399. }
    400. static void decodebin_child_added(GstChildProxy *child_proxy, GObject *object,
    401. gchar *name, gpointer user_data)
    402. {
    403. g_print("Decodebin child added: %s\n", name);
    404. if (g_strrstr(name, "decodebin") == name)
    405. {
    406. g_signal_connect(G_OBJECT(object), "child-added",
    407. G_CALLBACK(decodebin_child_added), user_data);
    408. }
    409. }
    410. static GstElement *
    411. create_source_bin(guint index, gchar *uri)
    412. {
    413. GstElement *bin = NULL, *uri_decode_bin = NULL;
    414. gchar bin_name[16] = {};
    415. g_snprintf(bin_name, 15, "source-bin-%02d", index);
    416. /* Create a source GstBin to abstract this bin's content from the rest of the
    417. * pipeline */
    418. bin = gst_bin_new(bin_name);
    419. /* Source element for reading from the uri.
    420. * We will use decodebin and let it figure out the container format of the
    421. * stream and the codec and plug the appropriate demux and decode plugins. */
    422. uri_decode_bin = gst_element_factory_make("uridecodebin", "uri-decode-bin");
    423. /* We set the input uri to the source element */
    424. g_object_set(G_OBJECT(uri_decode_bin), "uri", uri, NULL);
    425. /* Connect to the "pad-added" signal of the decodebin which generates a
    426. * callback once a new pad for raw data has beed created by the decodebin */
    427. g_signal_connect(G_OBJECT(uri_decode_bin), "pad-added", G_CALLBACK(cb_newpad), bin);
    428. g_signal_connect(G_OBJECT(uri_decode_bin), "child-added",
    429. G_CALLBACK(decodebin_child_added), bin);
    430. gst_bin_add(GST_BIN(bin), uri_decode_bin);
    431. /* We need to create a ghost pad for the source bin which will act as a proxy
    432. * for the video decoder src pad. The ghost pad will not have a target right
    433. * now. Once the decode bin creates the video decoder and generates the
    434. * cb_newpad callback, we will set the ghost pad target to the video decoder
    435. * src pad. */
    436. if (!gst_element_add_pad(bin, gst_ghost_pad_new_no_target("src",
    437. GST_PAD_SRC)))
    438. {
    439. g_printerr("Failed to add ghost pad in source bin\n");
    440. return NULL;
    441. }
    442. return bin;
    443. }
    444. int main(int argc, char *argv[])
    445. {
    446. GMainLoop *loop = NULL;
    447. GstElement *pipeline = NULL, *streammux = NULL, *sink = NULL, *pgie = NULL,
    448. *queue1, *queue2, *queue3, *queue4, *queue5, *nvvidconv = NULL, *qtmux = NULL, *videoconvert = NULL,
    449. *nvosd = NULL, *tiler = NULL, *sgie = NULL, *queue_sgie = NULL, *nvvidconv1 = NULL,
    450. *h264parser = NULL, *decoder = NULL;
    451. GstElement *filter1 = NULL, *filter2 = NULL, *filter3 = NULL, *filter4 = NULL, *x264enc = NULL, *converter = NULL;
    452. GstCaps *caps1 = NULL, *caps2 = NULL, *caps3 = NULL, *caps4 = NULL;
    453. #ifdef PLATFORM_TEGRA
    454. GstElement *transform = NULL;
    455. #endif
    456. GstBus *bus = NULL;
    457. guint bus_watch_id;
    458. GstPad *pgie_src_pad = NULL, *sgie_src_pad = NULL;
    459. guint i, num_sources;
    460. guint tiler_rows, tiler_columns;
    461. guint pgie_batch_size;
    462. /* Check input arguments */
    463. if (argc < 2)
    464. {
    465. g_printerr("Usage: %s [uri2] ... [uriN] \n", argv[0]);
    466. return -1;
    467. }
    468. num_sources = argc - 1;
    469. /* Standard GStreamer initialization */
    470. gst_init(&argc, &argv);
    471. loop = g_main_loop_new(NULL, FALSE);
    472. /* Create gstreamer elements */
    473. /* Create Pipeline element that will form a connection of other elements */
    474. pipeline = gst_pipeline_new("alpr-pipeline");
    475. /* Create nvstreammux instance to form batches from one or more sources. */
    476. streammux = gst_element_factory_make("nvstreammux", "stream-muxer");
    477. queue_sgie = gst_element_factory_make("queue", "queue_sgie");
    478. if (!pipeline || !streammux)
    479. {
    480. g_printerr("One element could not be created. Exiting.\n");
    481. return -1;
    482. }
    483. gst_bin_add(GST_BIN(pipeline), streammux);
    484. for (i = 0; i < num_sources; i++)
    485. {
    486. GstPad *sinkpad, *srcpad;
    487. gchar pad_name[16] = {};
    488. GstElement *source_bin = create_source_bin(i, argv[i + 1]);
    489. if (!source_bin)
    490. {
    491. g_printerr("Failed to create source bin. Exiting.\n");
    492. return -1;
    493. }
    494. gst_bin_add(GST_BIN(pipeline), source_bin);
    495. g_snprintf(pad_name, 15, "sink_%u", i);
    496. sinkpad = gst_element_get_request_pad(streammux, pad_name);
    497. if (!sinkpad)
    498. {
    499. g_printerr("Streammux request sink pad failed. Exiting.\n");
    500. return -1;
    501. }
    502. srcpad = gst_element_get_static_pad(source_bin, "src");
    503. if (!srcpad)
    504. {
    505. g_printerr("Failed to get src pad of source bin. Exiting.\n");
    506. return -1;
    507. }
    508. if (gst_pad_link(srcpad, sinkpad) != GST_PAD_LINK_OK)
    509. {
    510. g_printerr("Failed to link source bin to stream muxer. Exiting.\n");
    511. return -1;
    512. }
    513. gst_object_unref(srcpad);
    514. gst_object_unref(sinkpad);
    515. }
    516. /* Since the data format in the input file is elementary h264 stream,
    517. * we need a h264parser */
    518. h264parser = gst_element_factory_make("h264parse", "h264-parser");
    519. /* Use nvdec_h264 for hardware accelerated decode on GPU */
    520. decoder = gst_element_factory_make("nvv4l2decoder", "nvv4l2-decoder");
    521. /* Use nvinfer to infer on batched frame. */
    522. pgie = gst_element_factory_make("nvinfer", "primary-nvinference-engine");
    523. sgie = gst_element_factory_make("nvinfer", "secondary-nvinference-engine");
    524. /* Add queue elements between every two elements */
    525. queue1 = gst_element_factory_make("queue", "queue1");
    526. queue2 = gst_element_factory_make("queue", "queue2");
    527. queue3 = gst_element_factory_make("queue", "queue3");
    528. queue4 = gst_element_factory_make("queue", "queue4");
    529. queue5 = gst_element_factory_make("queue", "queue5");
    530. filter1 = gst_element_factory_make("capsfilter", "filter1");
    531. filter2 = gst_element_factory_make("capsfilter", "filter2");
    532. filter3 = gst_element_factory_make("capsfilter", "filter3");
    533. filter4 = gst_element_factory_make("capsfilter", "filter4");
    534. x264enc = gst_element_factory_make("x264enc", "h264 encoder");
    535. converter = gst_element_factory_make("videoconvert", "converter");
    536. videoconvert = gst_element_factory_make("videoconvert", "converter");
    537. qtmux = gst_element_factory_make("qtmux", "muxer");
    538. nvvidconv1 = gst_element_factory_make("nvvideoconvert", "nvvideo-converter1");
    539. /* Use nvtiler to composite the batched frames into a 2D tiled array based
    540. * on the source of the frames. */
    541. tiler = gst_element_factory_make("nvmultistreamtiler", "nvtiler");
    542. /* Use convertor to convert from NV12 to RGBA as required by nvosd */
    543. nvvidconv = gst_element_factory_make("nvvideoconvert", "nvvideo-converter");
    544. /* Create OSD to draw on the converted RGBA buffer */
    545. nvosd = gst_element_factory_make("nvdsosd", "nv-onscreendisplay");
    546. sink = gst_element_factory_make("filesink", "nvvideo-renderer");
    547. if (!pgie || !tiler || !nvvidconv || !nvosd || !sink)
    548. {
    549. g_printerr("One element could not be created. Exiting.\n");
    550. return -1;
    551. }
    552. if (!converter || !x264enc || !qtmux || !filter3 || !filter4)
    553. {
    554. g_printerr("One element could not be created. Exiting.\n");
    555. return -1;
    556. }
    557. #ifdef PLATFORM_TEGRA
    558. transform = gst_element_factory_make("queue", "nvegl-transform");
    559. if (!transform)
    560. {
    561. g_printerr("One tegra element could not be created. Exiting.\n");
    562. return -1;
    563. }
    564. #endif
    565. // #ifndef PLATFORM_TEGRA
    566. // /* Set properties of the nvvideoconvert element
    567. // * requires unified cuda memory for opencv blurring on CPU
    568. // */
    569. // g_object_set(G_OBJECT(nvvidconv), "nvbuf-memory-type", 0, NULL);
    570. // #else
    571. // g_object_set(G_OBJECT(nvvidconv), "nvbuf-memory-type", 4, NULL);
    572. // #endif
    573. g_object_set(G_OBJECT(streammux), "batch-size", num_sources, NULL);
    574. g_object_set(G_OBJECT(streammux), "width", MUXER_OUTPUT_WIDTH, "height",
    575. MUXER_OUTPUT_HEIGHT,
    576. "batched-push-timeout", MUXER_BATCH_TIMEOUT_USEC, NULL);
    577. /* Configure the nvinfer element using the nvinfer config file. */
    578. g_object_set(G_OBJECT(pgie),
    579. "config-file-path", PGIE_CONFIG_FILE, "output-tensor-meta", TRUE, NULL);
    580. g_object_set(G_OBJECT(sgie), "config-file-path", SGIE_CONFIG_FILE, "output-tensor-meta", TRUE, NULL);
    581. /* Override the batch-size set in the config file with the number of sources. */
    582. g_object_get(G_OBJECT(pgie), "batch-size", &pgie_batch_size, NULL);
    583. if (pgie_batch_size != num_sources)
    584. {
    585. g_printerr("WARNING: Overriding infer-config batch-size (%d) with number of sources (%d)\n",
    586. pgie_batch_size, num_sources);
    587. g_object_set(G_OBJECT(pgie), "batch-size", num_sources, NULL);
    588. }
    589. tiler_rows = (guint)sqrt(num_sources);
    590. tiler_columns = (guint)ceil(1.0 * num_sources / tiler_rows);
    591. /* we set the tiler properties here */
    592. g_object_set(G_OBJECT(tiler), "rows", tiler_rows, "columns", tiler_columns,
    593. "width", TILED_OUTPUT_WIDTH, "height", TILED_OUTPUT_HEIGHT, NULL);
    594. g_object_set(G_OBJECT(nvosd), "process-mode", OSD_PROCESS_MODE,
    595. "display-text", OSD_DISPLAY_TEXT, NULL);
    596. g_object_set(G_OBJECT(sink), "sync", FALSE, NULL);
    597. g_object_set(G_OBJECT(sink), "location", OUTPUT_FILE, NULL);
    598. /* we add a message handler */
    599. bus = gst_pipeline_get_bus(GST_PIPELINE(pipeline));
    600. bus_watch_id = gst_bus_add_watch(bus, bus_call, loop);
    601. gst_object_unref(bus);
    602. /* Set up the pipeline */
    603. /* we add all elements into the pipeline */
    604. #ifdef PLATFORM_TEGRA
    605. // , queue_sgie, sgie
    606. // h264parser, decoder,
    607. gst_bin_add_many(GST_BIN(pipeline), queue1, pgie, queue_sgie, sgie, queue2, tiler, queue3,
    608. filter1, nvvidconv, filter2, nvosd, nvvidconv1, filter3, converter, filter4,
    609. x264enc, qtmux, transform, sink, NULL);
    610. /* we link the elements together */
    611. if (!gst_element_link_many(streammux, queue1, pgie, queue_sgie, sgie, queue2, tiler, queue3,
    612. filter1, nvvidconv, filter2, nvosd, nvvidconv1, filter3, converter, filter4,
    613. x264enc, qtmux, transform, sink, NULL))
    614. {
    615. g_printerr("Elements could not be linked. Exiting.\n");
    616. return -1;
    617. }
    618. #else
    619. gst_bin_add_many(GST_BIN(pipeline), queue1, pgie, queue_sgie, sgie, queue2, tiler, queue3,
    620. filter1, nvvidconv, filter2, nvosd, nvvidconv1, filter3, converter, filter4,
    621. x264enc, qtmux, sink, NULL);
    622. /* we link the elements together */
    623. if (!gst_element_link_many(streammux, queue1, pgie, queue_sgie, sgie, queue2, tiler, queue3,
    624. filter1, nvvidconv, filter2, nvosd, nvvidconv1, filter3, converter, filter4,
    625. x264enc, qtmux, sink, NULL))
    626. {
    627. g_printerr("Elements could not be linked. Exiting.\n");
    628. return -1;
    629. }
    630. #endif
    631. caps1 = gst_caps_from_string("video/x-raw(memory:NVMM), format=NV12");
    632. g_object_set(G_OBJECT(filter1), "caps", caps1, NULL);
    633. gst_caps_unref(caps1);
    634. caps2 = gst_caps_from_string("video/x-raw(memory:NVMM), format=RGBA");
    635. g_object_set(G_OBJECT(filter2), "caps", caps2, NULL);
    636. gst_caps_unref(caps2);
    637. caps3 = gst_caps_from_string("video/x-raw, format=RGBA");
    638. g_object_set(G_OBJECT(filter3), "caps", caps3, NULL);
    639. gst_caps_unref(caps3);
    640. caps4 = gst_caps_from_string("video/x-raw, format=NV12");
    641. g_object_set(G_OBJECT(filter4), "caps", caps4, NULL);
    642. gst_caps_unref(caps4);
    643. /* Lets add probe to get informed of the meta data generated, we add probe to
    644. * the sink pad of the osd element, since by that time, the buffer would have
    645. * had got all the metadata. */
    646. pgie_src_pad = gst_element_get_static_pad(pgie, "src");
    647. gst_pad_add_probe(pgie_src_pad, GST_PAD_PROBE_TYPE_BUFFER,
    648. pgie_pad_buffer_probe, NULL, NULL);
    649. gst_object_unref(pgie_src_pad);
    650. sgie_src_pad = gst_element_get_static_pad(sgie, "src");
    651. gst_pad_add_probe(sgie_src_pad, GST_PAD_PROBE_TYPE_BUFFER,
    652. sgie_pad_buffer_probe, (gpointer)sink, NULL);
    653. gst_object_unref(sgie_src_pad);
    654. /* Set the pipeline to "playing" state */
    655. GST_DEBUG_BIN_TO_DOT_FILE(GST_BIN(pipeline), GST_DEBUG_GRAPH_SHOW_ALL, "pipeline");
    656. g_print("Now playing:");
    657. for (i = 0; i < num_sources; i++)
    658. {
    659. g_print(" %s", argv[i + 1]);
    660. }
    661. g_print("\n");
    662. gst_element_set_state(pipeline, GST_STATE_PLAYING);
    663. /* Wait till pipeline encounters an error or EOS */
    664. g_print("Running...\n");
    665. g_main_loop_run(loop);
    666. /* Out of the main loop, clean up nicely */
    667. g_print("Returned, stopping playback\n");
    668. gst_element_set_state(pipeline, GST_STATE_NULL);
    669. g_print("Deleting pipeline\n");
    670. gst_object_unref(GST_OBJECT(pipeline));
    671. g_source_remove(bus_watch_id);
    672. g_main_loop_unref(loop);
    673. return 0;
    674. }

     sgie获取帧数据,转opencv mat图:

    1. void Xstreamer::get_product_size(GstBuffer *buf, gint batch_id, gint source_id, std::vector &boxes)
    2. {
    3. if (boxes.size()==0){
    4. return;
    5. }
    6. NvDsMetaList * l_frame = NULL;
    7. NvDsMetaList * l_user_meta = NULL;
    8. // Get original raw data
    9. GstMapInfo in_map_info;
    10. memset(&in_map_info, 0, sizeof(in_map_info));
    11. if (!gst_buffer_map(buf, &in_map_info, GST_MAP_READ)){
    12. g_print("Error: Failed to map gst buffer\n");
    13. }
    14. NvBufSurface *surface = (NvBufSurface *)in_map_info.data;
    15. char *src_data = NULL;
    16. // cout<<"surfaceList[batch_id].dataSize: "<surfaceList[batch_id].dataSize<
    17. src_data = (char *)malloc(surface->surfaceList[batch_id].dataSize);
    18. if (src_data == NULL)
    19. {
    20. g_print("Error: failed to malloc src_data \n");
    21. }
    22. NvBufSurfaceMap(surface, -1, -1, NVBUF_MAP_READ_WRITE);
    23. NvBufSurfacePlaneParams *pParams = &surface->surfaceList[batch_id].planeParams;
    24. // cout<< "planeParams: "<< pParams->width[0] <<" "<height[0] <<" " << pParams->bytesPerPix[0]<< endl;
    25. gint frame_width = (gint)surface->surfaceList[batch_id].width;
    26. gint frame_height = (gint)surface->surfaceList[batch_id].height;
    27. gint frame_step = surface->surfaceList[batch_id].pitch;
    28. uint32_t current_frame_width = surface->surfaceList[batch_id].width;
    29. uint32_t current_frame_height = surface->surfaceList[batch_id].height;
    30. // save image
    31. cv::Mat current_frame_data = cv::Mat((gint)current_frame_height,
    32. (gint)current_frame_width,
    33. CV_8UC4,
    34. surface->surfaceList[batch_id].mappedAddr.addr[0],
    35. surface->surfaceList[batch_id].pitch
    36. );
    37. cv::Mat image_data((gint)current_frame_height,
    38. (gint)current_frame_width,
    39. CV_8UC4);
    40. current_frame_data.copyTo(image_data);
    41. // RGBA convert BGR
    42. cv::Mat in_mat = cv::Mat ((gint)current_frame_height,
    43. (gint)current_frame_width,
    44. CV_8UC3);
    45. cv::cvtColor(image_data, in_mat, cv::COLOR_RGBA2BGR); // opencv4

  • 相关阅读:
    postgrest API CURD数据库
    操作系统实验四 进程间通信
    C and C++ 在线参考手册
    项目实战第二十五讲:复杂业务流程编排规则引擎
    nacos
    量化交易之One Piece篇 - linux - 定时任务(重启服务器、执行程序、验证)
    基础 | 并发编程 - [锁]
    【基础篇】一、什么是Flink
    泛型内容总结
    【C++初阶】C++入门(一)
  • 原文地址:https://blog.csdn.net/jacke121/article/details/126024941