• FFmpeg 视频添加水印


    1.简介

    本示例,在视频中添加一个logo图片,把添加水印好的图片保存到本地。

    2.流程

    2.1打开输入的文件

    首先打开输入的视频文件,查找到视频流索引,找到对应的视频解码器,拷贝一些重要的参数到解码器,最后打开解码器。

    1. //av_register_all();
    2. avformat_network_init();
    3. ///打开输入的流
    4. int ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL);
    5. if (ret != 0)
    6. {
    7. printf("Couldn't open input stream.\n");
    8. return -1;
    9. }
    10. //查找流信息
    11. if (avformat_find_stream_info(fmt_ctx, NULL) < 0)
    12. {
    13. printf("Couldn't find stream information.\n");
    14. return -1;
    15. }
    16. //找到视频流索引
    17. video_index = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
    18. AVStream* st = fmt_ctx->streams[video_index];
    19. AVCodec* codec = nullptr;
    20. //找到解码器
    21. codec = avcodec_find_decoder(st->codecpar->codec_id);
    22. if (!codec)
    23. {
    24. fprintf(stderr, "Codec not found\n");
    25. return -1;
    26. }
    27. //申请AVCodecContext
    28. dec_ctx = avcodec_alloc_context3(codec);
    29. if (!dec_ctx)
    30. {
    31. return -1;
    32. }
    33. avcodec_parameters_to_context(dec_ctx, fmt_ctx->streams[video_index]->codecpar);
    34. //打开解码器
    35. if ((ret = avcodec_open2(dec_ctx, codec, NULL) < 0))
    36. {
    37. return -1;
    38. }
    39. return 0;

    2.2初始化滤镜

    2.2.1获取滤镜处理的源

    获得滤镜处理的源及滤镜处理的sink滤镜,同时申请输入和输出的滤镜结构AVFilterInOut。

    1. const AVFilter* buffersrc = avfilter_get_by_name("buffer");
    2. const AVFilter* buffersink = avfilter_get_by_name("buffersink");
    3. AVFilterInOut* outputs = avfilter_inout_alloc();
    4. AVFilterInOut* inputs = avfilter_inout_alloc();

    2.2.2处理AVFilterGraph

    需要的AVFilter和AVFilterInOut申请完成之后,需要申请一个AVFilterGraph,用来存储Filter的in和out描述信息

    1. AVFilterGraph* filter_graph = NULL;
    2. filter_graph = avfilter_graph_alloc();
    3. if (!outputs || !inputs || !filter_graph)
    4. {
    5. ret = AVERROR(ENOMEM);
    6. return ret;
    7. }

    2.2.3创建AVFilterContext

    接下来创建一个AVFilterContext结构用来存储Filter的处理内容,包括input与output的Filter信息,在创建input信息时,需要加入原视频的相关信息,比如pix_fmt、time_base等。

    首先输入参数:

    1. char args[512];
    2. snprintf(args, sizeof(args),
    3. "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
    4. dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
    5. time_base.num, time_base.den,
    6. dec_ctx->sample_aspect_ratio.num, dec_ctx->sample_aspect_ratio.den);

    然后创建AVFilterContext:

    1. AVFilterContext* buffersink_ctx = NULL;
    2. AVFilterContext* buffersrc_ctx = NULL;
    3. ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
    4. args, NULL, filter_graph);
    5. if (ret < 0)
    6. {
    7. av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
    8. return ret;
    9. }
    10. /* buffer video sink: to terminate the filter chain. */
    11. ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
    12. NULL, NULL, filter_graph);
    13. if (ret < 0)
    14. {
    15. av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
    16. return ret;
    17. }

    2.2.4设置其他参数

    创建完输入与输出的AVFilterContext之后,如果还需要设置一些其他与Filter相关的参数。可以通过使用av_opt_set_int_list进行设置,例如设置输出的pix_fmt参数。

    1. ret = av_opt_set_int_list(buffersink_ctx, "pix_fmts", pix_fmts,
    2. AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
    3. if (ret < 0)
    4. {
    5. av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
    6. return ret;
    7. }

    2.2.5建立滤镜解析器

    参数设置完毕后,可以针对前面设置的Filter相关的内容建立滤镜解析器。

    1. const char* filter_descr = "movie=logo.jpg[wm];[in][wm]overlay=5:5[out]";
    2. outputs->name = av_strdup("in");
    3. outputs->filter_ctx = buffersrc_ctx;
    4. outputs->pad_idx = 0;
    5. outputs->next = NULL;
    6. inputs->name = av_strdup("out");
    7. inputs->filter_ctx = buffersink_ctx;
    8. inputs->pad_idx = 0;
    9. inputs->next = NULL;
    10. if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr,
    11. &inputs, &outputs, NULL)) < 0)
    12. {
    13. return ret;
    14. }
    15. if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
    16. {
    17. return ret;
    18. }

    2.3读取数据解码并获取到添加好水印的数据。

    1. while (av_read_frame(fmt_ctx, pkt) >= 0)
    2. {
    3. if (pkt->stream_index == video_index)
    4. {
    5. int ret = avcodec_send_packet(dec_ctx, pkt);
    6. if (ret >= 0)
    7. {
    8. ret = avcodec_receive_frame(dec_ctx, frame);
    9. if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
    10. {
    11. continue;
    12. }
    13. else if (ret < 0)
    14. {
    15. continue;
    16. }
    17. frame->pts = frame->best_effort_timestamp;
    18. /* push the decoded frame into the filtergraph */
    19. if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0)
    20. {
    21. av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
    22. break;
    23. }
    24. /* pull filtered frames from the filtergraph */
    25. while (1)
    26. {
    27. ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
    28. if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
    29. break;
    30. if (ret < 0)
    31. break;
    32. switch (dec_ctx->pix_fmt)
    33. {
    34. case AV_PIX_FMT_YUV420P:
    35. {
    36. int size = av_image_get_buffer_size(AV_PIX_FMT_YUV420P, filt_frame->width, filt_frame->height, 1);
    37. char fileName[20] = { 0 };
    38. sprintf(fileName, "img2/%d.yuv", dec_ctx->frame_number);
    39. FILE* fp;
    40. fp = fopen(fileName, "wb");
    41. for (int i = 0; i < filt_frame->height; i++)
    42. {
    43. fwrite(filt_frame->data[0] + filt_frame->linesize[0] * i, 1, filt_frame->width, fp);
    44. }
    45. for (int i = 0; i < filt_frame->height / 2; i++)
    46. {
    47. fwrite(filt_frame->data[1] + filt_frame->linesize[1] * i, 1, filt_frame->width / 2, fp);
    48. }
    49. for (int i = 0; i < filt_frame->height / 2; i++)
    50. {
    51. fwrite(filt_frame->data[2] + filt_frame->linesize[2] * i, 1, filt_frame->width / 2, fp);
    52. }
    53. fclose(fp);
    54. }
    55. break;
    56. default:
    57. return -1;
    58. }
    59. av_frame_unref(filt_frame);
    60. }
    61. av_frame_unref(frame);
    62. }
    63. }
    64. }

    3.效果

    logo图如下

     

    效果图如下,可以看见添加到了左上角。

     4.源码

    1. #include "pch.h"
    2. #include
    3. #include
    4. extern "C"
    5. {
    6. #include "libavformat/avformat.h"
    7. #include "libavutil/dict.h"
    8. #include "libavutil/opt.h"
    9. #include "libavutil/timestamp.h"
    10. #include "libavutil/avutil.h"
    11. #include "libswscale/swscale.h"
    12. #include "libswresample/swresample.h"
    13. #include "libavutil/imgutils.h"
    14. #include "libavfilter/avfilter.h"
    15. #include "libavfilter/buffersink.h"
    16. #include "libavfilter/buffersrc.h"
    17. };
    18. static AVFormatContext* fmt_ctx = NULL;
    19. static AVCodecContext* dec_ctx = NULL;
    20. AVFilterContext* buffersink_ctx = NULL;
    21. AVFilterContext* buffersrc_ctx = NULL;
    22. AVFilterGraph* filter_graph = NULL;
    23. int video_index = -1;
    24. const char* filter_descr = "movie=logo.jpg[wm];[in][wm]overlay=5:5[out]";
    25. static int64_t last_pts = AV_NOPTS_VALUE;
    26. static int open_input_file(const char* filename)
    27. {
    28. //av_register_all();
    29. avformat_network_init();
    30. ///打开输入的流
    31. int ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL);
    32. if (ret != 0)
    33. {
    34. printf("Couldn't open input stream.\n");
    35. return -1;
    36. }
    37. //查找流信息
    38. if (avformat_find_stream_info(fmt_ctx, NULL) < 0)
    39. {
    40. printf("Couldn't find stream information.\n");
    41. return -1;
    42. }
    43. //找到视频流索引
    44. video_index = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
    45. AVStream* st = fmt_ctx->streams[video_index];
    46. AVCodec* codec = nullptr;
    47. //找到解码器
    48. codec = avcodec_find_decoder(st->codecpar->codec_id);
    49. if (!codec)
    50. {
    51. fprintf(stderr, "Codec not found\n");
    52. return -1;
    53. }
    54. //申请AVCodecContext
    55. dec_ctx = avcodec_alloc_context3(codec);
    56. if (!dec_ctx)
    57. {
    58. return -1;
    59. }
    60. avcodec_parameters_to_context(dec_ctx, fmt_ctx->streams[video_index]->codecpar);
    61. //打开解码器
    62. if ((ret = avcodec_open2(dec_ctx, codec, NULL) < 0))
    63. {
    64. return -1;
    65. }
    66. return 0;
    67. }
    68. static int init_filters(const char* filters_descr)
    69. {
    70. char args[512];
    71. int ret = 0;
    72. const AVFilter* buffersrc = avfilter_get_by_name("buffer");
    73. const AVFilter* buffersink = avfilter_get_by_name("buffersink");
    74. AVFilterInOut* outputs = avfilter_inout_alloc();
    75. AVFilterInOut* inputs = avfilter_inout_alloc();
    76. AVRational time_base = fmt_ctx->streams[video_index]->time_base;
    77. enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };
    78. filter_graph = avfilter_graph_alloc();
    79. if (!outputs || !inputs || !filter_graph)
    80. {
    81. ret = AVERROR(ENOMEM);
    82. return ret;
    83. }
    84. /* buffer video source: the decoded frames from the decoder will be inserted here. */
    85. snprintf(args, sizeof(args),
    86. "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
    87. dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
    88. time_base.num, time_base.den,
    89. dec_ctx->sample_aspect_ratio.num, dec_ctx->sample_aspect_ratio.den);
    90. ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
    91. args, NULL, filter_graph);
    92. if (ret < 0)
    93. {
    94. av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
    95. return ret;
    96. }
    97. /* buffer video sink: to terminate the filter chain. */
    98. ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
    99. NULL, NULL, filter_graph);
    100. if (ret < 0)
    101. {
    102. av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
    103. return ret;
    104. }
    105. ret = av_opt_set_int_list(buffersink_ctx, "pix_fmts", pix_fmts,
    106. AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
    107. if (ret < 0)
    108. {
    109. av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
    110. return ret;
    111. }
    112. outputs->name = av_strdup("in");
    113. outputs->filter_ctx = buffersrc_ctx;
    114. outputs->pad_idx = 0;
    115. outputs->next = NULL;
    116. inputs->name = av_strdup("out");
    117. inputs->filter_ctx = buffersink_ctx;
    118. inputs->pad_idx = 0;
    119. inputs->next = NULL;
    120. if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr,
    121. &inputs, &outputs, NULL)) < 0)
    122. {
    123. return ret;
    124. }
    125. if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
    126. {
    127. return ret;
    128. }
    129. avfilter_inout_free(&inputs);
    130. avfilter_inout_free(&outputs);
    131. return ret;
    132. }
    133. int main()
    134. {
    135. ///1.打开文件
    136. const char* inputUrl = "test.mp4";
    137. int ret = -1;
    138. if ((ret = open_input_file(inputUrl) < 0))
    139. {
    140. return -1;
    141. }
    142. ///2.初始化滤镜
    143. if ((ret = init_filters(filter_descr)) < 0)
    144. {
    145. return -1;
    146. }
    147. AVPacket* pkt = av_packet_alloc();
    148. //av_init_packet(pkt);
    149. AVFrame* frame = av_frame_alloc();
    150. AVFrame *filt_frame = av_frame_alloc();
    151. while (av_read_frame(fmt_ctx, pkt) >= 0)
    152. {
    153. if (pkt->stream_index == video_index)
    154. {
    155. int ret = avcodec_send_packet(dec_ctx, pkt);
    156. if (ret >= 0)
    157. {
    158. ret = avcodec_receive_frame(dec_ctx, frame);
    159. if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
    160. {
    161. continue;
    162. }
    163. else if (ret < 0)
    164. {
    165. continue;
    166. }
    167. frame->pts = frame->best_effort_timestamp;
    168. /* push the decoded frame into the filtergraph */
    169. if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0)
    170. {
    171. av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
    172. break;
    173. }
    174. /* pull filtered frames from the filtergraph */
    175. while (1)
    176. {
    177. ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
    178. if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
    179. break;
    180. if (ret < 0)
    181. break;
    182. switch (dec_ctx->pix_fmt)
    183. {
    184. case AV_PIX_FMT_YUV420P:
    185. {
    186. int size = av_image_get_buffer_size(AV_PIX_FMT_YUV420P, filt_frame->width, filt_frame->height, 1);
    187. char fileName[20] = { 0 };
    188. sprintf(fileName, "img2/%d.yuv", dec_ctx->frame_number);
    189. FILE* fp;
    190. fp = fopen(fileName, "wb");
    191. for (int i = 0; i < filt_frame->height; i++)
    192. {
    193. fwrite(filt_frame->data[0] + filt_frame->linesize[0] * i, 1, filt_frame->width, fp);
    194. }
    195. for (int i = 0; i < filt_frame->height / 2; i++)
    196. {
    197. fwrite(filt_frame->data[1] + filt_frame->linesize[1] * i, 1, filt_frame->width / 2, fp);
    198. }
    199. for (int i = 0; i < filt_frame->height / 2; i++)
    200. {
    201. fwrite(filt_frame->data[2] + filt_frame->linesize[2] * i, 1, filt_frame->width / 2, fp);
    202. }
    203. fclose(fp);
    204. }
    205. break;
    206. default:
    207. return -1;
    208. }
    209. av_frame_unref(filt_frame);
    210. }
    211. av_frame_unref(frame);
    212. }
    213. }
    214. }
    215. avfilter_graph_free(&filter_graph);
    216. avcodec_close(dec_ctx);
    217. avcodec_free_context(&dec_ctx);
    218. avformat_close_input(&fmt_ctx);
    219. av_frame_free(&frame);
    220. av_frame_free(&filt_frame);
    221. av_packet_free(&pkt);
    222. return 0;
    223. }

  • 相关阅读:
    JAVASE总结作业----网络编程
    python的多线程介绍之thread
    集火全屋智能“后装市场”,真正玩得转的没几个
    1、2快速生成
    Superset embed Dashboard到React App
    交换机和路由器技术-31-扩展ACL
    vue中axios的封装
    sdk文档书写,如何写具有可读性和可信度的文档
    人力资源APP功能
    【PCL自学:Segmentation3】基于PCL的点云分割:区域增长分割
  • 原文地址:https://blog.csdn.net/wzz953200463/article/details/125983491