• ffmpeg v4l2集成分析


    首先整理下linux kernel 和kernel: 入口
    ffmpeg集成了v4l2,那么如何看v4l2有关的代码呢?
    如果你看过**ffmpeg-codec函数调用流程分析**,那么就会明白,ffmpeg对于codec插件的集成只需要几个简单的回调函数怒,接下来我们看v4l2:

    static const AVOption options[] = {
        V4L_M2M_DEFAULT_OPTS,
        { "num_capture_buffers", "Number of buffers in the capture context",
            OFFSET(num_capture_buffers), AV_OPT_TYPE_INT, {.i64 = 20}, 20, INT_MAX, FLAGS },
        { NULL},
    };
    
    #define M2MDEC_CLASS(NAME) \
        static const AVClass v4l2_m2m_ ## NAME ## _dec_class = { \
            .class_name = #NAME "_v4l2m2m_decoder", \
            .item_name  = av_default_item_name, \
            .option     = options, \
            .version    = LIBAVUTIL_VERSION_INT, \
        };
    
    #define M2MDEC(NAME, LONGNAME, CODEC, bsf_name) \
        M2MDEC_CLASS(NAME) \
        const AVCodec ff_ ## NAME ## _v4l2m2m_decoder = { \
            .name           = #NAME "_v4l2m2m" , \
            .long_name      = NULL_IF_CONFIG_SMALL("V4L2 mem2mem " LONGNAME " decoder wrapper"), \
            .type           = AVMEDIA_TYPE_VIDEO, \
            .id             = CODEC , \
            .priv_data_size = sizeof(V4L2m2mPriv), \
            .priv_class     = &v4l2_m2m_ ## NAME ## _dec_class, \
            .init           = v4l2_decode_init, \
            .receive_frame  = v4l2_receive_frame, \
            .close          = v4l2_decode_close, \
            .bsfs           = bsf_name, \
            .capabilities   = AV_CODEC_CAP_HARDWARE | AV_CODEC_CAP_DELAY | AV_CODEC_CAP_AVOID_PROBING, \
            .caps_internal  = FF_CODEC_CAP_SETS_PKT_DTS | FF_CODEC_CAP_INIT_CLEANUP, \
            .wrapper_name   = "v4l2m2m", \
        }
    
    M2MDEC(h264,  "H.264", AV_CODEC_ID_H264,       "h264_mp4toannexb");
    M2MDEC(hevc,  "HEVC",  AV_CODEC_ID_HEVC,       "hevc_mp4toannexb");
    M2MDEC(mpeg1, "MPEG1", AV_CODEC_ID_MPEG1VIDEO, NULL);
    M2MDEC(mpeg2, "MPEG2", AV_CODEC_ID_MPEG2VIDEO, NULL);
    M2MDEC(mpeg4, "MPEG4", AV_CODEC_ID_MPEG4,      NULL);
    M2MDEC(h263,  "H.263", AV_CODEC_ID_H263,       NULL);
    M2MDEC(vc1 ,  "VC1",   AV_CODEC_ID_VC1,        NULL);
    M2MDEC(vp8,   "VP8",   AV_CODEC_ID_VP8,        NULL);
    M2MDEC(vp9,   "VP9",   AV_CODEC_ID_VP9,        NULL);
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43

    主要就是这三个函数:

     .init           = v4l2_decode_init, 
      .receive_frame  = v4l2_receive_frame, 
      .close          = v4l2_decode_close, 
    
    • 1
    • 2
    • 3

    所以一切的开始从这三个函数看(前提是你要知道这三个函数哪里调用)。

    在开始分析函数之前,要看三个重要结构体:
    V4L2Buffer 这个是对V4L2的封装,作为AVFrame,AVPacket和v4l2_buffer之间的媒介。

    enum V4L2Buffer_status {
        V4L2BUF_AVAILABLE,
        V4L2BUF_IN_DRIVER,
        V4L2BUF_RET_USER,
    };
    
    /**
     * V4L2Buffer (wrapper for v4l2_buffer management)
     */
    typedef struct V4L2Buffer {
        /* each buffer needs to have a reference to its context */
        struct V4L2Context *context;
    
        /* This object is refcounted per-plane, so we need to keep track
         * of how many context-refs we are holding. */
        AVBufferRef *context_ref;
        atomic_uint context_refcount;
    
        /* keep track of the mmap address and mmap length */
        struct V4L2Plane_info {
            int bytesperline;
            void * mm_addr;
            size_t length;
        } plane_info[VIDEO_MAX_PLANES];
    
        int num_planes;
    
        /* the v4l2_buffer buf.m.planes pointer uses the planes[] mem */
        struct v4l2_buffer buf;
        struct v4l2_plane planes[VIDEO_MAX_PLANES];
    
        int flags;
        enum V4L2Buffer_status status;
    
    } V4L2Buffer;
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35

    V4L2Context 这个结构体主要针对的是v4l2的输入port和输出port

    typedef struct V4L2Context {
        /**
         * context name.
         */
        const char* name;
    
        /**
         * Type of this buffer context.
         * See V4L2_BUF_TYPE_VIDEO_* in videodev2.h
         * Readonly after init.
         */
        enum v4l2_buf_type type;
    
        /**
         * AVPixelFormat corresponding to this buffer context.
         * AV_PIX_FMT_NONE means this is an encoded stream.
         */
        enum AVPixelFormat av_pix_fmt;
    
        /**
         * AVCodecID corresponding to this buffer context.
         * AV_CODEC_ID_RAWVIDEO means this is a raw stream and av_pix_fmt must be set to a valid value.
         */
        enum AVCodecID av_codec_id;
    
        /**
         * Format returned by the driver after initializing the buffer context.
         * Readonly after init.
         */
        struct v4l2_format format;
    
        /**
         * Width and height of the frames it produces (in case of a capture context, e.g. when decoding)
         * or accepts (in case of an output context, e.g. when encoding).
         */
        int width, height;
        AVRational sample_aspect_ratio;
    
        /**
         * Indexed array of V4L2Buffers
         */
        V4L2Buffer *buffers;
    
        /**
         * Readonly after init.
         */
        int num_buffers;
    
        /**
         * Whether the stream has been started (VIDIOC_STREAMON has been sent).
         */
        int streamon;
    
        /**
         *  Either no more buffers available or an unrecoverable error was notified
         *  by the V4L2 kernel driver: once set the context has to be exited.
         */
        int done;
    
    } V4L2Context;
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60

    V4L2m2mContext 是一个管理结构体,管理整个v4l2上下文,包括上面提到的两个port

    typedef struct V4L2m2mContext {
        char devname[PATH_MAX];
        int fd;
    
        /* the codec context queues */
        //输入
        V4L2Context capture;
        //输出
        V4L2Context output;
    
        /* dynamic stream reconfig */
        //codec上下文
        AVCodecContext *avctx;
        sem_t refsync;
        atomic_uint refcount;
        int reinit;
    
        /* null frame/packet received */
        int draining;
        AVPacket buf_pkt;
    
        /* Reference to a frame. Only used during encoding */
        AVFrame *frame;
    
        /* Reference to self; only valid while codec is active. */
        AVBufferRef *self_ref;
    
        /* reference back to V4L2m2mPriv */
        void *priv;
    } V4L2m2mContext;
    
    //这个结构体也是AVCodecContext的priv_data
    typedef struct V4L2m2mPriv {
        AVClass *class;
    
        V4L2m2mContext *context;
        AVBufferRef    *context_ref;
    
        int num_output_buffers;
        int num_capture_buffers;
    } V4L2m2mPriv;
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41

    一、v4l2初始化

    
    static av_cold int v4l2_decode_init(AVCodecContext *avctx)
    {
        V4L2Context *capture, *output;
        V4L2m2mContext *s;
        V4L2m2mPriv *priv = avctx->priv_data;
        int ret;
    //这个函数归根揭底就是创建了一个V4L2m2mContext ,acctx的priv_data是V4L2m2mPriv 
    //这个结构体在avcodec_alloc_context3()中创建内存,它的大小在插件中指定
        ret = ff_v4l2_m2m_create_context(priv, &s);
        if (ret < 0)
            return ret;
    
        capture = &s->capture;
        output = &s->output;
    
        /* if these dimensions are invalid (ie, 0 or too small) an event will be raised
         * by the v4l2 driver; this event will trigger a full pipeline reconfig and
         * the proper values will be retrieved from the kernel driver.
         */
        output->height = capture->height = avctx->coded_height;
        output->width = capture->width = avctx->coded_width;
    
    //因为输出av_pix_fmt  是解码器指定的,所以这里设置为AV_PIX_FMT_NONE
        output->av_codec_id = avctx->codec_id;
        output->av_pix_fmt  = AV_PIX_FMT_NONE;
    //对于输入我们是知道pix_fmt的
        capture->av_codec_id = AV_CODEC_ID_RAWVIDEO;
        capture->av_pix_fmt = avctx->pix_fmt;
    
        s->avctx = avctx;
        ret = ff_v4l2_m2m_codec_init(priv);
        if (ret) {
            av_log(avctx, AV_LOG_ERROR, "can't configure decoder\n");
            return ret;
        }
    
        return v4l2_prepare_decoder(s);
    }
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39

    这里主要看ff_v4l2_m2m_create_context()是如何创建V4L2m2mContext并初始化的。

    int ff_v4l2_m2m_create_context(V4L2m2mPriv *priv, V4L2m2mContext **s)
    {
    //创建V4L2m2mContext
        *s = av_mallocz(sizeof(V4L2m2mContext));
        if (!*s)
            return AVERROR(ENOMEM);
    //这里把V4L2m2mContext又分配到context_ref 上去
        priv->context_ref = av_buffer_create((uint8_t *) *s, sizeof(V4L2m2mContext),
                                             &v4l2_m2m_destroy_context, NULL, 0);
        if (!priv->context_ref) {
            av_freep(s);
            return AVERROR(ENOMEM);
        }
    
        /* assign the context */
        //在这里V4L2m2mContext和V4L2m2mPriv互相指认。
        priv->context = *s;
        (*s)->priv = priv;
    
        /* populate it */
        priv->context->capture.num_buffers = priv->num_capture_buffers;
        priv->context->output.num_buffers  = priv->num_output_buffers;
        //这里又是一个bufref的赋值
        priv->context->self_ref = priv->context_ref;
        priv->context->fd = -1;
    
        priv->context->frame = av_frame_alloc();
        if (!priv->context->frame) {
            av_buffer_unref(&priv->context_ref);
            *s = NULL; /* freed when unreferencing context_ref */
            return AVERROR(ENOMEM);
        }
    
        return 0;
    }
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35

    到这里大概清楚了,V4L2m2mContext被创建了一个AVBufferRef,这个引用被V4L2m2mPriv和V4L2m2mContext它自生持有,但是这个时候AVBufferRef的refcount是1,那么什么时候refcount增加呢?我们继续看到在V4L2Buffer中也有一个AVBufferRef,在函数中v4l2_buf_increase_ref()终于发现了av_buffer_ref(s->self_ref),也就是说只要V4L2Buffer对象存在,并且其中的数据被传递到了用户层,那么V4L2m2mContext被引用的计数就加1,只有当用户层unref了数据后,调用v4l2_free_buffer()释放v4l2内容的时候才会av_buffer_unref(&avbuf->context_ref); 重点解说:当用户层hold了v4l2buf中的数据的时候,V4L2m2mContext引用计数就会加1,用户层释放数据的时候,V4L2m2mContext引用计数减一。

    ff_v4l2_m2m_codec_init()代码如下:

    int ff_v4l2_m2m_codec_init(V4L2m2mPriv *priv)
    {
        int ret = AVERROR(EINVAL);
        struct dirent *entry;
        DIR *dirp;
    
        V4L2m2mContext *s = priv->context;
    
        dirp = opendir("/dev");
        if (!dirp)
            return AVERROR(errno);
    //这里从/dev/videoX中找到一个可以使用的设备
        for (entry = readdir(dirp); entry; entry = readdir(dirp)) {
    
            if (strncmp(entry->d_name, "video", 5))
                continue;
    
            snprintf(s->devname, sizeof(s->devname), "/dev/%s", entry->d_name);
            av_log(s->avctx, AV_LOG_DEBUG, "probing device %s\n", s->devname);
            ret = v4l2_probe_driver(s);
            if (!ret)
                break;
        }
    
        closedir(dirp);
    
        if (ret) {
            av_log(s->avctx, AV_LOG_ERROR, "Could not find a valid device\n");
            memset(s->devname, 0, sizeof(s->devname));
    
            return ret;
        }
    
        av_log(s->avctx, AV_LOG_INFO, "Using device %s\n", s->devname);
    
        return v4l2_configure_contexts(s);
    }
    
    
    
    static int v4l2_configure_contexts(V4L2m2mContext *s)
    {
        void *log_ctx = s->avctx;
        int ret;
        struct v4l2_format ofmt, cfmt;
    
        s->fd = open(s->devname, O_RDWR | O_NONBLOCK, 0);
        if (s->fd < 0)
            return AVERROR(errno);
    
        ret = v4l2_prepare_contexts(s, 0);
        if (ret < 0)
            goto error;
    
        ofmt = s->output.format;
        cfmt = s->capture.format;
        av_log(log_ctx, AV_LOG_INFO, "requesting formats: output=%s capture=%s\n",
                                     av_fourcc2str(V4L2_TYPE_IS_MULTIPLANAR(ofmt.type) ?
                                                   ofmt.fmt.pix_mp.pixelformat :
                                                   ofmt.fmt.pix.pixelformat),
                                     av_fourcc2str(V4L2_TYPE_IS_MULTIPLANAR(cfmt.type) ?
                                                   cfmt.fmt.pix_mp.pixelformat :
                                                   cfmt.fmt.pix.pixelformat));
    //ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_S_FMT, &ctx->format);
        ret = ff_v4l2_context_set_format(&s->output);
        if (ret) {
            av_log(log_ctx, AV_LOG_ERROR, "can't set v4l2 output format\n");
            goto error;
        }
    //ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_S_FMT, &ctx->format);
        ret = ff_v4l2_context_set_format(&s->capture);
        if (ret) {
            av_log(log_ctx, AV_LOG_ERROR, "can't to set v4l2 capture format\n");
            goto error;
        }
    //这里对buf进行了创建和加入队列
        ret = ff_v4l2_context_init(&s->output);
        if (ret) {
            av_log(log_ctx, AV_LOG_ERROR, "no v4l2 output context's buffers\n");
            goto error;
        }
    
        /* decoder's buffers need to be updated at a later stage */
        if (s->avctx && !av_codec_is_decoder(s->avctx->codec)) {
        //如果是解码对capture进行设置
            ret = ff_v4l2_context_init(&s->capture);
            if (ret) {
                av_log(log_ctx, AV_LOG_ERROR, "no v4l2 capture context's buffers\n");
                goto error;
            }
        }
    
        return 0;
    
    error:
        if (close(s->fd) < 0) {
            ret = AVERROR(errno);
            av_log(log_ctx, AV_LOG_ERROR, "error closing %s (%s)\n",
                s->devname, av_err2str(AVERROR(errno)));
        }
        s->fd = -1;
    
        return ret;
    }
    //如果改为userptr就在这两个函数上该
    static int v4l2_prepare_contexts(V4L2m2mContext *s, int probe)
    {
        struct v4l2_capability cap;
        void *log_ctx = s->avctx;
        int ret;
    
        s->capture.done = s->output.done = 0;
        s->capture.name = "capture";
        s->output.name = "output";
        atomic_init(&s->refcount, 0);
        sem_init(&s->refsync, 0, 0);
    
        memset(&cap, 0, sizeof(cap));
        //这里对设备进行了查询
        ret = ioctl(s->fd, VIDIOC_QUERYCAP, &cap);
        if (ret < 0)
            return ret;
    
        av_log(log_ctx, probe ? AV_LOG_DEBUG : AV_LOG_INFO,
                         "driver '%s' on card '%s' in %s mode\n", cap.driver, cap.card,
                         v4l2_mplane_video(&cap) ? "mplane" :
                         v4l2_splane_video(&cap) ? "splane" : "unknown");
    //?
        if (v4l2_mplane_video(&cap)) {
            s->capture.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
            s->output.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
            return 0;
        }
    
    //?
        if (v4l2_splane_video(&cap)) {
            s->capture.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
            s->output.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
            return 0;
        }
    //上面两个都有问题呢,应该是输入的解码视频应该是splane,输出的是mplane,所以我们这里加一句
    //默认的v4l2
        if (!v4l2_splane_video(&cap)&&!v4l2_mplane_video(&cap)) {
            s->capture.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
            s->output.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
            return 0;
        }
    
        return AVERROR(EINVAL);
    }
    
    int ff_v4l2_context_init(V4L2Context* ctx)
    {
        V4L2m2mContext *s = ctx_to_m2mctx(ctx);
        struct v4l2_requestbuffers req;
        int ret, i;
    
        if (!v4l2_type_supported(ctx)) {
            av_log(logger(ctx), AV_LOG_ERROR, "type %i not supported\n", ctx->type);
            return AVERROR_PATCHWELCOME;
        }
        
    	//这里获取format
        ret = ioctl(s->fd, VIDIOC_G_FMT, &ctx->format);
        if (ret)
            av_log(logger(ctx), AV_LOG_ERROR, "%s VIDIOC_G_FMT failed\n", ctx->name);
    
        memset(&req, 0, sizeof(req));
        req.count = ctx->num_buffers;
        req.memory = V4L2_MEMORY_MMAP;
        req.type = ctx->type;
        //这里请求buf的个数
        ret = ioctl(s->fd, VIDIOC_REQBUFS, &req);
        if (ret < 0) {
            av_log(logger(ctx), AV_LOG_ERROR, "%s VIDIOC_REQBUFS failed: %s\n", ctx->name, strerror(errno));
            return AVERROR(errno);
        }
    
        ctx->num_buffers = req.count;
        ctx->buffers = av_mallocz(ctx->num_buffers * sizeof(V4L2Buffer));
        if (!ctx->buffers) {
            av_log(logger(ctx), AV_LOG_ERROR, "%s malloc enomem\n", ctx->name);
            return AVERROR(ENOMEM);
        }
    
        for (i = 0; i < req.count; i++) {
            ctx->buffers[i].context = ctx;
            ret = ff_v4l2_buffer_initialize(&ctx->buffers[i], i);
            if (ret < 0) {
                av_log(logger(ctx), AV_LOG_ERROR, "%s buffer[%d] initialization (%s)\n", ctx->name, i, av_err2str(ret));
                goto error;
            }
        }
    
        av_log(logger(ctx), AV_LOG_DEBUG, "%s: %s %02d buffers initialized: %04ux%04u, sizeimage %08u, bytesperline %08u\n", ctx->name,
            V4L2_TYPE_IS_MULTIPLANAR(ctx->type) ? av_fourcc2str(ctx->format.fmt.pix_mp.pixelformat) : av_fourcc2str(ctx->format.fmt.pix.pixelformat),
            req.count,
            v4l2_get_width(&ctx->format),
            v4l2_get_height(&ctx->format),
            V4L2_TYPE_IS_MULTIPLANAR(ctx->type) ? ctx->format.fmt.pix_mp.plane_fmt[0].sizeimage : ctx->format.fmt.pix.sizeimage,
            V4L2_TYPE_IS_MULTIPLANAR(ctx->type) ? ctx->format.fmt.pix_mp.plane_fmt[0].bytesperline : ctx->format.fmt.pix.bytesperline);
    
        return 0;
    
    error:
        v4l2_release_buffers(ctx);
    
        av_freep(&ctx->buffers);
    
        return ret;
    }
    
    
    //根据上面获取到的个数,对buf进行初始化,
    int ff_v4l2_buffer_initialize(V4L2Buffer* avbuf, int index)
    {
        V4L2Context *ctx = avbuf->context;
        int ret, i;
    
        avbuf->buf.memory = V4L2_MEMORY_MMAP;
        avbuf->buf.type = ctx->type;
        avbuf->buf.index = index;
    
        if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
            avbuf->buf.length = VIDEO_MAX_PLANES;
            avbuf->buf.m.planes = avbuf->planes;
        }
    //查询buf的大小
        ret = ioctl(buf_to_m2mctx(avbuf)->fd, VIDIOC_QUERYBUF, &avbuf->buf);
        if (ret < 0)
            return AVERROR(errno);
    
        if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
            avbuf->num_planes = 0;
            /* in MP, the V4L2 API states that buf.length means num_planes */
            for (i = 0; i < avbuf->buf.length; i++) {
                if (avbuf->buf.m.planes[i].length)
                    avbuf->num_planes++;
            }
        } else
            avbuf->num_planes = 1;
    
    //在下面对地址进行映射
        for (i = 0; i < avbuf->num_planes; i++) {
    
            avbuf->plane_info[i].bytesperline = V4L2_TYPE_IS_MULTIPLANAR(ctx->type) ?
                ctx->format.fmt.pix_mp.plane_fmt[i].bytesperline :
                ctx->format.fmt.pix.bytesperline;
    
            if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
                avbuf->plane_info[i].length = avbuf->buf.m.planes[i].length;
                avbuf->plane_info[i].mm_addr = mmap(NULL, avbuf->buf.m.planes[i].length,
                                               PROT_READ | PROT_WRITE, MAP_SHARED,
                                               buf_to_m2mctx(avbuf)->fd, avbuf->buf.m.planes[i].m.mem_offset);
            } else {
                avbuf->plane_info[i].length = avbuf->buf.length;
                avbuf->plane_info[i].mm_addr = mmap(NULL, avbuf->buf.length,
                                              PROT_READ | PROT_WRITE, MAP_SHARED,
                                              buf_to_m2mctx(avbuf)->fd, avbuf->buf.m.offset);
            }
    
            if (avbuf->plane_info[i].mm_addr == MAP_FAILED)
                return AVERROR(ENOMEM);
        }
    //状态改变为可用
        avbuf->status = V4L2BUF_AVAILABLE;
    
        if (V4L2_TYPE_IS_OUTPUT(ctx->type))
            return 0;
    
        if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
            avbuf->buf.m.planes = avbuf->planes;
            avbuf->buf.length   = avbuf->num_planes;
    
        } else {
            avbuf->buf.bytesused = avbuf->planes[0].bytesused;
            avbuf->buf.length    = avbuf->planes[0].length;
        }
    // 入队 ioctl(buf_to_m2mctx(avbuf)->fd, VIDIOC_QBUF, &avbuf->buf)
        return ff_v4l2_buffer_enqueue(avbuf);
    }
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69
    • 70
    • 71
    • 72
    • 73
    • 74
    • 75
    • 76
    • 77
    • 78
    • 79
    • 80
    • 81
    • 82
    • 83
    • 84
    • 85
    • 86
    • 87
    • 88
    • 89
    • 90
    • 91
    • 92
    • 93
    • 94
    • 95
    • 96
    • 97
    • 98
    • 99
    • 100
    • 101
    • 102
    • 103
    • 104
    • 105
    • 106
    • 107
    • 108
    • 109
    • 110
    • 111
    • 112
    • 113
    • 114
    • 115
    • 116
    • 117
    • 118
    • 119
    • 120
    • 121
    • 122
    • 123
    • 124
    • 125
    • 126
    • 127
    • 128
    • 129
    • 130
    • 131
    • 132
    • 133
    • 134
    • 135
    • 136
    • 137
    • 138
    • 139
    • 140
    • 141
    • 142
    • 143
    • 144
    • 145
    • 146
    • 147
    • 148
    • 149
    • 150
    • 151
    • 152
    • 153
    • 154
    • 155
    • 156
    • 157
    • 158
    • 159
    • 160
    • 161
    • 162
    • 163
    • 164
    • 165
    • 166
    • 167
    • 168
    • 169
    • 170
    • 171
    • 172
    • 173
    • 174
    • 175
    • 176
    • 177
    • 178
    • 179
    • 180
    • 181
    • 182
    • 183
    • 184
    • 185
    • 186
    • 187
    • 188
    • 189
    • 190
    • 191
    • 192
    • 193
    • 194
    • 195
    • 196
    • 197
    • 198
    • 199
    • 200
    • 201
    • 202
    • 203
    • 204
    • 205
    • 206
    • 207
    • 208
    • 209
    • 210
    • 211
    • 212
    • 213
    • 214
    • 215
    • 216
    • 217
    • 218
    • 219
    • 220
    • 221
    • 222
    • 223
    • 224
    • 225
    • 226
    • 227
    • 228
    • 229
    • 230
    • 231
    • 232
    • 233
    • 234
    • 235
    • 236
    • 237
    • 238
    • 239
    • 240
    • 241
    • 242
    • 243
    • 244
    • 245
    • 246
    • 247
    • 248
    • 249
    • 250
    • 251
    • 252
    • 253
    • 254
    • 255
    • 256
    • 257
    • 258
    • 259
    • 260
    • 261
    • 262
    • 263
    • 264
    • 265
    • 266
    • 267
    • 268
    • 269
    • 270
    • 271
    • 272
    • 273
    • 274
    • 275
    • 276
    • 277
    • 278
    • 279
    • 280
    • 281

    v4l2_prepare_decoder()主要用来订阅event,一个是V4L2_EVENT_SOURCE_CHANGE,另外一个是VIDIOC_SUBSCRIBE_EVENT

    static int v4l2_prepare_decoder(V4L2m2mContext *s)
    {
        struct v4l2_event_subscription sub;
        V4L2Context *output = &s->output;
        int ret;
    
        /**
         * requirements
         */
        memset(&sub, 0, sizeof(sub));
        sub.type = V4L2_EVENT_SOURCE_CHANGE;
        //订阅时间:source change
        ret = ioctl(s->fd, VIDIOC_SUBSCRIBE_EVENT, &sub);
        if ( ret < 0) {
            if (output->height == 0 || output->width == 0) {
                av_log(s->avctx, AV_LOG_ERROR,
                    "the v4l2 driver does not support VIDIOC_SUBSCRIBE_EVENT\n"
                    "you must provide codec_height and codec_width on input\n");
                return ret;
            }
        }
    
        memset(&sub, 0, sizeof(sub));
        sub.type = V4L2_EVENT_EOS;
        //订阅event:eos
        ret = ioctl(s->fd, VIDIOC_SUBSCRIBE_EVENT, &sub);
        if (ret < 0)
            av_log(s->avctx, AV_LOG_WARNING,
                   "the v4l2 driver does not support end of stream VIDIOC_SUBSCRIBE_EVENT\n");
    
        return 0;
    }
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32

    二、 v4l2_receive_frame()

    
    static int v4l2_receive_frame(AVCodecContext *avctx, AVFrame *frame)
    {
        V4L2m2mContext *s = ((V4L2m2mPriv*)avctx->priv_data)->context;
        V4L2Context *const capture = &s->capture;
        V4L2Context *const output = &s->output;
        int ret;
    
        if (!s->buf_pkt.size) {
        //从bsf中获取到数据
            ret = ff_decode_get_packet(avctx, &s->buf_pkt);
            if (ret < 0) {
                if (ret == AVERROR(EAGAIN))
                	//如果没有获取到pkt,那就去看有没有解码成功的yuv出来,注意,这里是capture
                    return ff_v4l2_context_dequeue_frame(capture, frame, 0);
                else if (ret != AVERROR_EOF)
                    return ret;
            }
        }
    
        if (s->draining)
            goto dequeue;
    //这里是一个关键点,将pkt送入到解码器中进行解码,注意这里是output
        ret = ff_v4l2_context_enqueue_packet(output, &s->buf_pkt);
        if (ret < 0 && ret != AVERROR(EAGAIN))
            goto fail;
    
        /* if EAGAIN don't unref packet and try to enqueue in the next iteration */
        if (ret != AVERROR(EAGAIN))
            av_packet_unref(&s->buf_pkt);
    
        if (!s->draining) {
        //这里很重要,对于解码来说,需要在这里重新设置capture的buf,以及打开capture stream
            ret = v4l2_try_start(avctx);
            if (ret) {
                /* cant recover */
                if (ret != AVERROR(ENOMEM))
                    ret = 0;
                goto fail;
            }
        }
    
    dequeue:
        return ff_v4l2_context_dequeue_frame(capture, frame, -1);
    fail:
        av_packet_unref(&s->buf_pkt);
        return ret;
    }
    
    //这里获取解码后的数据
    int ff_v4l2_context_dequeue_frame(V4L2Context* ctx, AVFrame* frame, int timeout)
    {
        V4L2Buffer *avbuf;
    
        /*
         * timeout=-1 blocks until:
         *  1. decoded frame available
         *  2. an input buffer is ready to be dequeued
         */
        avbuf = v4l2_dequeue_v4l2buf(ctx, timeout);
        if (!avbuf) {
            if (ctx->done)
                return AVERROR_EOF;
    
            return AVERROR(EAGAIN);
        }
    //v4l2buf与avbuf之间的转换
        return ff_v4l2_buffer_buf_to_avframe(frame, avbuf);
    }
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69
    • 70

    ff_v4l2_context_enqueue_packet()这是将数据送入v4l2

    int ff_v4l2_context_enqueue_packet(V4L2Context* ctx, const AVPacket* pkt)
    {
        V4L2m2mContext *s = ctx_to_m2mctx(ctx);
        V4L2Buffer* avbuf;
        int ret;
    
        if (!pkt->size) {
            ret = v4l2_stop_decode(ctx);
            if (ret)
                av_log(logger(ctx), AV_LOG_ERROR, "%s stop_decode\n", ctx->name);
            s->draining = 1;
            return 0;
        }
    //获取空闲的buf
        avbuf = v4l2_getfree_v4l2buf(ctx);
        if (!avbuf)
            return AVERROR(EAGAIN);
    //avpkt到v4l2buf的转换
        ret = ff_v4l2_buffer_avpkt_to_buf(pkt, avbuf);
        if (ret)
            return ret;
    //这里入队
        return ff_v4l2_buffer_enqueue(avbuf);
    }
    
    //获取空闲的buf
    static V4L2Buffer* v4l2_getfree_v4l2buf(V4L2Context *ctx)
    {
        int timeout = 0; /* return when no more buffers to dequeue */
        int i;
    
        /* get back as many output buffers as possible */
        //如果是raw数据输入口,先去v4l2获取null的buf
        if (V4L2_TYPE_IS_OUTPUT(ctx->type)) {
              do {
              } while (v4l2_dequeue_v4l2buf(ctx, timeout));
        }
        
        
        for (i = 0; i < ctx->num_buffers; i++) {
            if (ctx->buffers[i].status == V4L2BUF_AVAILABLE)
                return &ctx->buffers[i];
        }
    
        return NULL;
    }
    
    
    
    //这里是出队,包括输入port的空buf,和输出port的yuv buf
    static V4L2Buffer* v4l2_dequeue_v4l2buf(V4L2Context *ctx, int timeout)
    {
        struct v4l2_plane planes[VIDEO_MAX_PLANES];
        struct v4l2_buffer buf = { 0 };
        V4L2Buffer *avbuf;
        //添加event
        struct pollfd pfd = {
            .events =  POLLIN | POLLRDNORM | POLLPRI | POLLOUT | POLLWRNORM, /* default blocking capture */
            .fd = ctx_to_m2mctx(ctx)->fd,
        };
        int i, ret;
    
    	//解码的输入输出跟摄像头的输入和输出刚好相反
    	//所以这里的output对应着解码的输入,所以这里检查的是不是输入,那么就是输出端
        if (!V4L2_TYPE_IS_OUTPUT(ctx->type) && ctx->buffers) {
        //这里也很简单,看看有没有buf在驱动内部,如果有才会到下面去,如果都在驱动层,那说明我们要从驱动层获取buf.
            for (i = 0; i < ctx->num_buffers; i++) {
                if (ctx->buffers[i].status == V4L2BUF_IN_DRIVER)
                    break;
            }
            if (i == ctx->num_buffers)
                av_log(logger(ctx), AV_LOG_WARNING, "All capture buffers returned to "
                                                    "userspace. Increase num_capture_buffers "
                                                    "to prevent device deadlock or dropped "
                                                    "packets/frames.\n");
        }
    
        /* if we are draining and there are no more capture buffers queued in the driver we are done */
        if (!V4L2_TYPE_IS_OUTPUT(ctx->type) && ctx_to_m2mctx(ctx)->draining) {
            for (i = 0; i < ctx->num_buffers; i++) {
                /* capture buffer initialization happens during decode hence
                 * detection happens at runtime
                 */
                if (!ctx->buffers)
                    break;
    
                if (ctx->buffers[i].status == V4L2BUF_IN_DRIVER)
                    goto start;
            }
            ctx->done = 1;
            return NULL;
        }
    
    start:
    	//如果是output,其实就是输入数据端,只要监听这两个就够了
        if (V4L2_TYPE_IS_OUTPUT(ctx->type))
            pfd.events =  POLLOUT | POLLWRNORM;
        else {
            /* no need to listen to requests for more input while draining */
            if (ctx_to_m2mctx(ctx)->draining)
                pfd.events =  POLLIN | POLLRDNORM | POLLPRI;
        }
    
        for (;;) {
            ret = poll(&pfd, 1, timeout);
            if (ret > 0)
                break;
            if (errno == EINTR)
                continue;
            return NULL;
        }
    
        /* 0. handle errors */
        if (pfd.revents & POLLERR) {
            /* if we are trying to get free buffers but none have been queued yet
               no need to raise a warning */
            if (timeout == 0) {
                for (i = 0; i < ctx->num_buffers; i++) {
                    if (ctx->buffers[i].status != V4L2BUF_AVAILABLE)
                        av_log(logger(ctx), AV_LOG_WARNING, "%s POLLERR\n", ctx->name);
                }
            }
            else
                av_log(logger(ctx), AV_LOG_WARNING, "%s POLLERR\n", ctx->name);
    
            return NULL;
        }
    
        /* 1. handle resolution changes */
        if (pfd.revents & POLLPRI) {
            ret = v4l2_handle_event(ctx);
            if (ret < 0) {
                /* if re-init failed, abort */
                ctx->done = 1;
                return NULL;
            }
            if (ret) {
                /* if re-init was successful drop the buffer (if there was one)
                 * since we had to reconfigure capture (unmap all buffers)
                 */
                return NULL;
            }
        }
    
        /* 2. dequeue the buffer */
        if (pfd.revents & (POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM)) {
    		//说明有解码后的数据过来了
            if (!V4L2_TYPE_IS_OUTPUT(ctx->type)) {
                /* there is a capture buffer ready */
                if (pfd.revents & (POLLIN | POLLRDNORM))
                    goto dequeue;
    
                /* the driver is ready to accept more input; instead of waiting for the capture
                 * buffer to complete we return NULL so input can proceed (we are single threaded)
                 */
                if (pfd.revents & (POLLOUT | POLLWRNORM))
                    return NULL;
            }
    
    dequeue:
            memset(&buf, 0, sizeof(buf));
            buf.memory = V4L2_MEMORY_MMAP;
            buf.type = ctx->type;
            if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
                memset(planes, 0, sizeof(planes));
                buf.length = VIDEO_MAX_PLANES;
                buf.m.planes = planes;
            }
    
            ret = ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_DQBUF, &buf);
            if (ret) {
                if (errno != EAGAIN) {
                    ctx->done = 1;
                    if (errno != EPIPE)
                        av_log(logger(ctx), AV_LOG_DEBUG, "%s VIDIOC_DQBUF, errno (%s)\n",
                            ctx->name, av_err2str(AVERROR(errno)));
                }
                return NULL;
            }
    
            if (ctx_to_m2mctx(ctx)->draining && !V4L2_TYPE_IS_OUTPUT(ctx->type)) {
                int bytesused = V4L2_TYPE_IS_MULTIPLANAR(buf.type) ?
                                buf.m.planes[0].bytesused : buf.bytesused;
                if (bytesused == 0) {
                    ctx->done = 1;
                    return NULL;
                }
    #ifdef V4L2_BUF_FLAG_LAST
                if (buf.flags & V4L2_BUF_FLAG_LAST)
                    ctx->done = 1;
    #endif
            }
    
            avbuf = &ctx->buffers[buf.index];
            avbuf->status = V4L2BUF_AVAILABLE;
            avbuf->buf = buf;
            //只有解码YUV数据才拷贝
            if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
                memcpy(avbuf->planes, planes, sizeof(planes));
                avbuf->buf.m.planes = avbuf->planes;
            }
            return avbuf;
        }
    
        return NULL;
    }
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69
    • 70
    • 71
    • 72
    • 73
    • 74
    • 75
    • 76
    • 77
    • 78
    • 79
    • 80
    • 81
    • 82
    • 83
    • 84
    • 85
    • 86
    • 87
    • 88
    • 89
    • 90
    • 91
    • 92
    • 93
    • 94
    • 95
    • 96
    • 97
    • 98
    • 99
    • 100
    • 101
    • 102
    • 103
    • 104
    • 105
    • 106
    • 107
    • 108
    • 109
    • 110
    • 111
    • 112
    • 113
    • 114
    • 115
    • 116
    • 117
    • 118
    • 119
    • 120
    • 121
    • 122
    • 123
    • 124
    • 125
    • 126
    • 127
    • 128
    • 129
    • 130
    • 131
    • 132
    • 133
    • 134
    • 135
    • 136
    • 137
    • 138
    • 139
    • 140
    • 141
    • 142
    • 143
    • 144
    • 145
    • 146
    • 147
    • 148
    • 149
    • 150
    • 151
    • 152
    • 153
    • 154
    • 155
    • 156
    • 157
    • 158
    • 159
    • 160
    • 161
    • 162
    • 163
    • 164
    • 165
    • 166
    • 167
    • 168
    • 169
    • 170
    • 171
    • 172
    • 173
    • 174
    • 175
    • 176
    • 177
    • 178
    • 179
    • 180
    • 181
    • 182
    • 183
    • 184
    • 185
    • 186
    • 187
    • 188
    • 189
    • 190
    • 191
    • 192
    • 193
    • 194
    • 195
    • 196
    • 197
    • 198
    • 199
    • 200
    • 201
    • 202
    • 203
    • 204
    • 205
    • 206

    上面其实都看到了对于output(也就是rawvideo数据的接口)的buf的入队和出队,但是对于caputre(解码后数据)的buf,只看到每次dequeue,没有看到enqueue,这个比较特殊,它在v4l2的v4l2_free_buffer中:

    static void v4l2_free_buffer(void *opaque, uint8_t *unused)
    {
        V4L2Buffer* avbuf = opaque;
        V4L2m2mContext *s = buf_to_m2mctx(avbuf);
    
        if (atomic_fetch_sub(&avbuf->context_refcount, 1) == 1) {
            atomic_fetch_sub_explicit(&s->refcount, 1, memory_order_acq_rel);
    
            if (s->reinit) {
                if (!atomic_load(&s->refcount))
                    sem_post(&s->refsync);
            } else {
                if (s->draining && V4L2_TYPE_IS_OUTPUT(avbuf->context->type)) {
                    /* no need to queue more buffers to the driver */
                    avbuf->status = V4L2BUF_AVAILABLE;
                }
                else if (avbuf->context->streamon)//数据释放的时候会自动入队
                    ff_v4l2_buffer_enqueue(avbuf);
            }
    
            av_buffer_unref(&avbuf->context_ref);
        }
    }
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23

    下面就不得不提一个非常重要的函数:

    ff_v4l2_buffer_buf_to_avframe()
    ff_v4l2_buffer_buf_to_avpkt()
    
    • 1
    • 2

    1、ff_v4l2_buffer_buf_to_avframe()该函数牵扯到计数引用的问题,也就是说当解码后的frame传递到应用层以后,需要用户释放unref以后,v4l2插件底层才可以再次使用这个buf

    int ff_v4l2_buffer_buf_to_avframe(AVFrame *frame, V4L2Buffer *avbuf)
    {
        int ret;
    
        av_frame_unref(frame);
    
        /* 1. get references to the actual data */
        //这里进行计数引用
        ret = v4l2_buffer_buf_to_swframe(frame, avbuf);
        if (ret)
            return ret;
    
        /* 2. get frame information */
        frame->key_frame = !!(avbuf->buf.flags & V4L2_BUF_FLAG_KEYFRAME);
        frame->color_primaries = v4l2_get_color_primaries(avbuf);
        frame->colorspace = v4l2_get_color_space(avbuf);
        frame->color_range = v4l2_get_color_range(avbuf);
        frame->color_trc = v4l2_get_color_trc(avbuf);
        frame->pts = v4l2_get_pts(avbuf);
        frame->pkt_dts = AV_NOPTS_VALUE;
    
        /* these values are updated also during re-init in v4l2_process_driver_event */
        frame->height = avbuf->context->height;
        frame->width = avbuf->context->width;
        frame->sample_aspect_ratio = avbuf->context->sample_aspect_ratio;
    
        /* 3. report errors upstream */
        if (avbuf->buf.flags & V4L2_BUF_FLAG_ERROR) {
            av_log(logger(avbuf), AV_LOG_ERROR, "%s: driver decode error\n", avbuf->context->name);
            frame->decode_error_flags |= FF_DECODE_ERROR_INVALID_BITSTREAM;
        }
    
        return 0;
    }
    
    static int v4l2_buffer_buf_to_swframe(AVFrame *frame, V4L2Buffer *avbuf)
    {
        int i, ret;
    
        frame->format = avbuf->context->av_pix_fmt;
    
        for (i = 0; i < avbuf->num_planes; i++) {
        //这里把vbuf中的数据导入到frame中,但是这里是浅拷贝
        //如果是yuv数据,这里会被调用三次,因为num_plane是3
            ret = v4l2_buf_to_bufref(avbuf, i, &frame->buf[i]);
            if (ret)
                return ret;
    
            frame->linesize[i] = avbuf->plane_info[i].bytesperline;
            frame->data[i] = frame->buf[i]->data;
        }
    
        /* fixup special cases */
        switch (avbuf->context->av_pix_fmt) {
        case AV_PIX_FMT_NV12:
        case AV_PIX_FMT_NV21:
            if (avbuf->num_planes > 1)
                break;
            frame->linesize[1] = avbuf->plane_info[0].bytesperline;
            frame->data[1] = frame->buf[0]->data + avbuf->plane_info[0].bytesperline * avbuf->context->format.fmt.pix_mp.height;
            break;
    
        case AV_PIX_FMT_YUV420P:
            if (avbuf->num_planes > 1)
                break;
            frame->linesize[1] = avbuf->plane_info[0].bytesperline >> 1;
            frame->linesize[2] = avbuf->plane_info[0].bytesperline >> 1;
            frame->data[1] = frame->buf[0]->data + avbuf->plane_info[0].bytesperline * avbuf->context->format.fmt.pix_mp.height;
            frame->data[2] = frame->data[1] + ((avbuf->plane_info[0].bytesperline * avbuf->context->format.fmt.pix_mp.height) >> 2);
            break;
    
        default:
            break;
        }
    
        return 0;
    }
    
    
    static int v4l2_buf_to_bufref(V4L2Buffer *in, int plane, AVBufferRef **buf)
    {
        int ret;
    
        if (plane >= in->num_planes)
            return AVERROR(EINVAL);
    
        /* even though most encoders return 0 in data_offset encoding vp8 does require this value */
        //
        *buf = av_buffer_create((char *)in->plane_info[plane].mm_addr + in->planes[plane].data_offset,
                                in->plane_info[plane].length, v4l2_free_buffer, in, 0);
        if (!*buf)
            return AVERROR(ENOMEM);
    //这里是各种引用和计数的关键体现
    //因为v4l2是采用0 copy,v4l2buf的数据直接传递到了用户层
        ret = v4l2_buf_increase_ref(in);
        if (ret)
            av_buffer_unref(buf);
    
        return ret;
    }
    
    //这里应该是前面几个结构体中各种引用的关键体现
    static int v4l2_buf_increase_ref(V4L2Buffer *in)
    {
        V4L2m2mContext *s = buf_to_m2mctx(in);
    //每个v4l2buf中有一个context_ref
        if (in->context_ref)
            atomic_fetch_add(&in->context_refcount, 1);
        else {
        //第一次是进入到这里来,直接引用了V4L2m2mContext
        //也就是说,只要v4l2buf中的数据没有被释放,V4L2m2mContext就不能释放.因为refcount大于1
            in->context_ref = av_buffer_ref(s->self_ref);
            if (!in->context_ref)
                return AVERROR(ENOMEM);
    
            in->context_refcount = 1;
        }
    
        in->status = V4L2BUF_RET_USER;
        //V4L2m2mContext 的refcount也会增加1
        atomic_fetch_add_explicit(&s->refcount, 1, memory_order_relaxed);
    
        return 0;
    }
    
    static void v4l2_free_buffer(void *opaque, uint8_t *unused)
    {
        V4L2Buffer* avbuf = opaque;
        V4L2m2mContext *s = buf_to_m2mctx(avbuf);
    //对于一个v4l2buf,对于yuv数据,那么context_refcount会是3,当最后一个plane中的数据被释放的时候
    //context_refcount就为1
        if (atomic_fetch_sub(&avbuf->context_refcount, 1) == 1) {
            atomic_fetch_sub_explicit(&s->refcount, 1, memory_order_acq_rel);
    
            if (s->reinit) {
                if (!atomic_load(&s->refcount))
                    sem_post(&s->refsync);
            } else {
                if (s->draining && V4L2_TYPE_IS_OUTPUT(avbuf->context->type)) {
                    /* no need to queue more buffers to the driver */
                    avbuf->status = V4L2BUF_AVAILABLE;
                }
                //这里也是deocoder输出port重新入队的地方
                else if (avbuf->context->streamon)
                    ff_v4l2_buffer_enqueue(avbuf);
            }
    
            av_buffer_unref(&avbuf->context_ref);
        }
    }
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69
    • 70
    • 71
    • 72
    • 73
    • 74
    • 75
    • 76
    • 77
    • 78
    • 79
    • 80
    • 81
    • 82
    • 83
    • 84
    • 85
    • 86
    • 87
    • 88
    • 89
    • 90
    • 91
    • 92
    • 93
    • 94
    • 95
    • 96
    • 97
    • 98
    • 99
    • 100
    • 101
    • 102
    • 103
    • 104
    • 105
    • 106
    • 107
    • 108
    • 109
    • 110
    • 111
    • 112
    • 113
    • 114
    • 115
    • 116
    • 117
    • 118
    • 119
    • 120
    • 121
    • 122
    • 123
    • 124
    • 125
    • 126
    • 127
    • 128
    • 129
    • 130
    • 131
    • 132
    • 133
    • 134
    • 135
    • 136
    • 137
    • 138
    • 139
    • 140
    • 141
    • 142
    • 143
    • 144
    • 145
    • 146
    • 147
    • 148
    • 149
    • 150
    • 151
  • 相关阅读:
    MySQL Server 和 MySQL Workbench安装
    CesiumJS 2022^ 源码解读[0] - 文章目录与源码工程结构
    HIVE 3 使用 MR 引擎多表关联 (JOIN) 导致丢数的问题复现、问题根源及解决方案 (附代码)
    Elasticsearch 的页面工具kibana中 dev tool 菜单使用
    51万奖池邀你参战——第二届阿里云ECS CloudBuild开发者大赛来袭
    这次弄一下maven 多模块项目,用vscode新建一下,便于管理项目
    2.2.3 vim操作合集
    日常Bug排查-改表时读数据不一致
    Methyltetrazine-C12-Maleimide/5-TAMRA-5-Tetrazine/BDP FL methyltetrazine的制备
    分布式协调系统ZooKeeper实践与原理剖析
  • 原文地址:https://blog.csdn.net/weixin_43360707/article/details/127638819