deepstream SDK地址:NVIDIA DeepStream SDK Developer Guide — DeepStream 6.1.1 Release documentation
nvinfer地址:Gst-nvinfer — DeepStream 6.1.1 Release documentation
nvinfer插件使用来做推理的插件,输入,输出可以看nvinfer的插件介绍。这个插件是代码开源的,现在分析下源代码。
管道启动后,会进入gst_nvinfer_start,如下:
gst_nvinfer_start{
status = createNvDsInferContext (&infer_context, *init_params, nvinfer, gst_nvinfer_logger);
//创建推理的上下文,是一个推理的封装,最后传给插件的成员变量m_InferCtx
nvinfer->output_thread =
g_thread_new ("nvinfer-output-thread", gst_nvinfer_output_loop, nvinfer); //做后处理,添加Meta
if (!nvinfer->input_tensor_from_meta) {
nvinfer->input_queue_thread =
g_thread_new ("nvinfer-input-queue-thread", gst_nvinfer_input_queue_loop,
nvinfer); //做前处理,推理
......
}
线程1:主线程
上游插件传下来的数据会先进入gst_nvinfer_submit_input_buffer函数,
gst_nvinfer_submit_input_buffer{
......
if (nvinfer->input_tensor_from_meta) {
flow_ret = gst_nvinfer_process_tensor_input (nvinfer, inbuf, in_surf);
} else if (nvinfer->process_full_frame) {
flow_ret = gst_nvinfer_process_full_frame (nvinfer, inbuf, in_surf);
} else {
flow_ret = gst_nvinfer_process_objects (nvinfer, inbuf, in_surf);
}
......
}
可以看到,根据不同的配置,会进入不同的函数。
如果nvinfer->input_tensor_from_meta是1,就会进入gst_nvinfer_process_tensor_input,nvinfer就直接从Meta中获取已经做了前处理的tensor,经过tensors.push_back (tensor)之后, 所有tensor数据都保存到std::vector
if (i == frames.size () - 1 || batch->frames.size () == nvinfer->max_batch_size) {......
NvDsInferContextBatchPreprocessedInput input_batch;
DS_NVINFER_IMPL (nvinfer)->m_InferCtx->queueInputBatchPreprocessed (input_batch);
//在queueInputBatchPreprocessed中,不做前处理,直接去做推理(m_BackendContext->enqueueBuffer,enqueueBuffer不开源)。
}
如果nvinfer->process_full_frame是1,就会进入gst_nvinfer_process_full_frame,这个函数主要是将源图缩放填充到模型需要的尺寸,如下:
gst_nvinfer_process_full_frame{......
gst_buffer_pool_acquire_buffer (nvinfer->pool, &conv_gst_buf, nullptr); //分配临时GstBuffer
batch->conv_buf = conv_gst_buf;
get_converted_buffer(... //这个函数只是先计算下缩放填充的参数,并不是实际的转换处理。
/* Submit batch if the batch size has reached max_batch_size. */
if (batch->frames.size () == nvinfer->max_batch_size) {
if (!convert_batch_and_push_to_input_thread (nvinfer, batch.get(), memory)) ......//按最大batch-size分配进行转换,会调用实际转换函数NvBufSurfTransformAsync。
g_queue_push_tail (nvinfer->input_queue, batch); //转换后的数据会推到队列,gst_nvinfer_input_queue_loop线程会去处理。
}
如果nvinfer->input_tensor_from_meta=0 && nvinfer->process_full_frame=0,就会进入gst_nvinfer_process_objects,这是SGIE对源图的object进行处理,如下:
gst_nvinfer_process_objects{......
if (nvinfer->classifier_async_mode && object_meta->object_id == UNTRACKED_OBJECT_ID) {
//这是异常情况,异步分类必须跟traker配合。
}
source_info->object_history_map.find (object_meta->object_id); //查找历史记录。
bool needs_infer = should_infer_object (...... //检查是否要再进行推理,比如不需要此模型进行推理,或者已经做了分类,obeject的大小又没怎么变,就不需要再推理。
if (!needs_infer) {
continue;
}
if (obj_history && nvinfer->classifier_async_mode) {
attach_metadata_classifier(......//异步模型又有历史记录,那就直接添加分类的meta。
}
batch.reset (new GstNvInferBatch); //为推理做准备,再下面的代码跟gst_nvinfer_process_full_frame中的类似。
}
线程2:gst_nvinfer_input_queue_loop
如果nvinfer->input_tensor_from_meta=1,就不会启动此线程,这个线程主要做推理,如下:
gst_nvinfer_input_queue_loop{......
while (nvinfer->stop == FALSE) {
NvDsInferContextBatchInput input_batch; //推理函数queueInputBatch的参数
batch = (GstNvInferBatch *) g_queue_pop_head (nvinfer->input_queue); //取出要推理的数据。
/* Form the vector of input frame pointers. */
for (i = 0; i < batch->frames.size (); i++) {
input_frames.push_back (batch->frames[i].converted_frame_ptr); //添加推理数据
}
input_batch.inputFrames = input_frames.data ();
input_batch.numInputFrames = input_frames.size ();
input_batch.inputFormat = NvDsInferFormat_RGBA;
status = nvdsinfer_ctx->queueInputBatch (input_batch); //先做前处理(m_Preprocessor->transform),再做推理(m_BackendContext->enqueueBuffer),再拷贝推理数据(m_Postprocessor->copyBuffersToHostMemory)。
g_queue_push_tail (nvinfer->process_queue, batch);
}
}
线程3:gst_nvinfer_output_loop
此函数主要走后处理和添加结果meta, 如下:
gst_nvinfer_output_loop
while (!nvinfer->stop) {......
/* Pop a batch from the element's process queue. */
batch.reset ((GstNvInferBatch *) g_queue_pop_head (nvinfer->process_queue));
/* Need to only push buffer to downstream element. This batch was not
* actually submitted for inferencing. */
if (batch->push_buffer) { //需要推到下游。
gst_pad_push (GST_BASE_TRANSFORM_SRC_PAD (nvinfer), batch->inbuf);
}
/* Dequeue inferencing output from NvDsInferContext */
status = nvdsinfer_ctx->dequeueOutputBatch (*batch_output); //取出推理后的数据做后处理,如聚类算法(m_Postprocessor->postProcessHost)。
}
}
几个设置的解释
raw-output-file-write 如果为true,推理后的数据,会在nvinfer的gst_nvinfer_output_generated_file_write中,被写到当前目录,如下:
/* Write layer contents to file if enabled. */
if (nvinfer->write_raw_buffers_to_file) {
gst_nvinfer_output_generated_file_write (batch->inbuf,
&nvinfer->network_info,
nvinfer->layers_info->data (),
nvinfer->layers_info->size (), batch->frames.size (), nvinfer);
}
infer-raw-output-dir 设了该值,就等于设置了config->raw_output_directory, create_primary_gie_bin和create_secondary_gie就会设置写的回调函数,raw_output_directory这个值会在写的回调函数write_infer_output_to_file被使用。
如下:
if (config->raw_output_directory) {
g_object_set (G_OBJECT (bin->primary_gie),
"raw-output-generated-callback", out_callback,
"raw-output-generated-userdata", config,
NULL);
}
if (nvinfer->output_generated_callback) { //此函数也就是设置raw-output-generated-callback的out_callback,也就是write_infer_output_to_file。
nvinfer->output_generated_callback (batch->inbuf,
&nvinfer->network_info,
nvinfer->layers_info->data (),
nvinfer->layers_info->size (),
batch->frames.size (), nvinfer->output_generated_userdata);
}