• 深入理解FFmpeg--libavformat接口使用(一)


    libavformat(lavf)是一个用于处理各种媒体容器格式的库。它的主要两个目的是去复用(即将媒体文件拆分为组件流)和复用的反向过程(以指定的容器格式写入提供的数据)。它还有一个I/O模块,支持多种访问数据的协议(如文件、tcp、http等)。在使用lavf之前,您需要调用av_register_all()来注册所有已编译的复用器、解复用器和协议。除非您绝对确定不会使用libavformat的网络功能,否则还应该调用avformat_network_init();

    一、媒体流封装(Muxing)

    媒体流封装主要是指以AVPackets的形式获取编码后的数据后,以指定的容器格式将其写入到文件或者其他方式输出到字节流中。

    Muxing实际执行的主要API有:

    初始化:avformat_alloc_output_context2();

    创建媒体流(如果有的话):avformat_new_stream();

    写文件头:avformat_write_header();

    写数据包:av_write_frame()/av_interleaved_write_frame();

    写文件尾:av_write_trailer();

    流程图:

    代码示例:

    在官方源码:/doc/examples/muxing.c

    1. #include
    2. #include
    3. #include
    4. #include
    5. #include
    6. #include
    7. #include
    8. #include
    9. #include
    10. #include
    11. #include
    12. #include
    13. #include
    14. #define STREAM_DURATION 10.0
    15. #define STREAM_FRAME_RATE 25 /* 25 images/s */
    16. #define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */
    17. #define SCALE_FLAGS SWS_BICUBIC
    18. // a wrapper around a single output AVStream
    19. typedef struct OutputStream {
    20. AVStream *st;
    21. AVCodecContext *enc;
    22. /* pts of the next frame that will be generated */
    23. int64_t next_pts;
    24. int samples_count;
    25. AVFrame *frame;
    26. AVFrame *tmp_frame;
    27. AVPacket *tmp_pkt;
    28. float t, tincr, tincr2;
    29. struct SwsContext *sws_ctx;
    30. struct SwrContext *swr_ctx;
    31. } OutputStream;
    32. static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt)
    33. {
    34. AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;
    35. printf("pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
    36. av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),
    37. av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),
    38. av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base),
    39. pkt->stream_index);
    40. }
    41. static int write_frame(AVFormatContext *fmt_ctx, AVCodecContext *c,
    42. AVStream *st, AVFrame *frame, AVPacket *pkt)
    43. {
    44. int ret;
    45. // send the frame to the encoder
    46. ret = avcodec_send_frame(c, frame);
    47. if (ret < 0) {
    48. fprintf(stderr, "Error sending a frame to the encoder: %s\n",
    49. av_err2str(ret));
    50. exit(1);
    51. }
    52. while (ret >= 0) {
    53. ret = avcodec_receive_packet(c, pkt);
    54. if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
    55. break;
    56. else if (ret < 0) {
    57. fprintf(stderr, "Error encoding a frame: %s\n", av_err2str(ret));
    58. exit(1);
    59. }
    60. /* rescale output packet timestamp values from codec to stream timebase */
    61. av_packet_rescale_ts(pkt, c->time_base, st->time_base);
    62. pkt->stream_index = st->index;
    63. /* Write the compressed frame to the media file. */
    64. log_packet(fmt_ctx, pkt);
    65. ret = av_interleaved_write_frame(fmt_ctx, pkt);
    66. /* pkt is now blank (av_interleaved_write_frame() takes ownership of
    67. * its contents and resets pkt), so that no unreferencing is necessary.
    68. * This would be different if one used av_write_frame(). */
    69. if (ret < 0) {
    70. fprintf(stderr, "Error while writing output packet: %s\n", av_err2str(ret));
    71. exit(1);
    72. }
    73. }
    74. return ret == AVERROR_EOF ? 1 : 0;
    75. }
    76. /* Add an output stream. */
    77. static void add_stream(OutputStream *ost, AVFormatContext *oc,
    78. const AVCodec **codec,
    79. enum AVCodecID codec_id)
    80. {
    81. AVCodecContext *c;
    82. int i;
    83. /* find the encoder */
    84. *codec = avcodec_find_encoder(codec_id);
    85. if (!(*codec)) {
    86. fprintf(stderr, "Could not find encoder for '%s'\n",
    87. avcodec_get_name(codec_id));
    88. exit(1);
    89. }
    90. ost->tmp_pkt = av_packet_alloc();
    91. if (!ost->tmp_pkt) {
    92. fprintf(stderr, "Could not allocate AVPacket\n");
    93. exit(1);
    94. }
    95. ost->st = avformat_new_stream(oc, NULL);
    96. if (!ost->st) {
    97. fprintf(stderr, "Could not allocate stream\n");
    98. exit(1);
    99. }
    100. ost->st->id = oc->nb_streams-1;
    101. c = avcodec_alloc_context3(*codec);
    102. if (!c) {
    103. fprintf(stderr, "Could not alloc an encoding context\n");
    104. exit(1);
    105. }
    106. ost->enc = c;
    107. switch ((*codec)->type) {
    108. case AVMEDIA_TYPE_AUDIO:
    109. c->sample_fmt = (*codec)->sample_fmts ?
    110. (*codec)->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
    111. c->bit_rate = 64000;
    112. c->sample_rate = 44100;
    113. if ((*codec)->supported_samplerates) {
    114. c->sample_rate = (*codec)->supported_samplerates[0];
    115. for (i = 0; (*codec)->supported_samplerates[i]; i++) {
    116. if ((*codec)->supported_samplerates[i] == 44100)
    117. c->sample_rate = 44100;
    118. }
    119. }
    120. c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
    121. c->channel_layout = AV_CH_LAYOUT_STEREO;
    122. if ((*codec)->channel_layouts) {
    123. c->channel_layout = (*codec)->channel_layouts[0];
    124. for (i = 0; (*codec)->channel_layouts[i]; i++) {
    125. if ((*codec)->channel_layouts[i] == AV_CH_LAYOUT_STEREO)
    126. c->channel_layout = AV_CH_LAYOUT_STEREO;
    127. }
    128. }
    129. c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
    130. ost->st->time_base = (AVRational){ 1, c->sample_rate };
    131. break;
    132. case AVMEDIA_TYPE_VIDEO:
    133. c->codec_id = codec_id;
    134. c->bit_rate = 400000;
    135. /* Resolution must be a multiple of two. */
    136. c->width = 352;
    137. c->height = 288;
    138. /* timebase: This is the fundamental unit of time (in seconds) in terms
    139. * of which frame timestamps are represented. For fixed-fps content,
    140. * timebase should be 1/framerate and timestamp increments should be
    141. * identical to 1. */
    142. ost->st->time_base = (AVRational){ 1, STREAM_FRAME_RATE };
    143. c->time_base = ost->st->time_base;
    144. c->gop_size = 12; /* emit one intra frame every twelve frames at most */
    145. c->pix_fmt = STREAM_PIX_FMT;
    146. if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
    147. /* just for testing, we also add B-frames */
    148. c->max_b_frames = 2;
    149. }
    150. if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
    151. /* Needed to avoid using macroblocks in which some coeffs overflow.
    152. * This does not happen with normal video, it just happens here as
    153. * the motion of the chroma plane does not match the luma plane. */
    154. c->mb_decision = 2;
    155. }
    156. break;
    157. default:
    158. break;
    159. }
    160. /* Some formats want stream headers to be separate. */
    161. if (oc->oformat->flags & AVFMT_GLOBALHEADER)
    162. c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
    163. }
    164. /**************************************************************/
    165. /* audio output */
    166. static AVFrame *alloc_audio_frame(enum AVSampleFormat sample_fmt,
    167. uint64_t channel_layout,
    168. int sample_rate, int nb_samples)
    169. {
    170. AVFrame *frame = av_frame_alloc();
    171. int ret;
    172. if (!frame) {
    173. fprintf(stderr, "Error allocating an audio frame\n");
    174. exit(1);
    175. }
    176. frame->format = sample_fmt;
    177. frame->channel_layout = channel_layout;
    178. frame->sample_rate = sample_rate;
    179. frame->nb_samples = nb_samples;
    180. if (nb_samples) {
    181. ret = av_frame_get_buffer(frame, 0);
    182. if (ret < 0) {
    183. fprintf(stderr, "Error allocating an audio buffer\n");
    184. exit(1);
    185. }
    186. }
    187. return frame;
    188. }
    189. static void open_audio(AVFormatContext *oc, const AVCodec *codec,
    190. OutputStream *ost, AVDictionary *opt_arg)
    191. {
    192. AVCodecContext *c;
    193. int nb_samples;
    194. int ret;
    195. AVDictionary *opt = NULL;
    196. c = ost->enc;
    197. /* open it */
    198. av_dict_copy(&opt, opt_arg, 0);
    199. ret = avcodec_open2(c, codec, &opt);
    200. av_dict_free(&opt);
    201. if (ret < 0) {
    202. fprintf(stderr, "Could not open audio codec: %s\n", av_err2str(ret));
    203. exit(1);
    204. }
    205. /* init signal generator */
    206. ost->t = 0;
    207. ost->tincr = 2 * M_PI * 110.0 / c->sample_rate;
    208. /* increment frequency by 110 Hz per second */
    209. ost->tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
    210. if (c->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE)
    211. nb_samples = 10000;
    212. else
    213. nb_samples = c->frame_size;
    214. ost->frame = alloc_audio_frame(c->sample_fmt, c->channel_layout,
    215. c->sample_rate, nb_samples);
    216. ost->tmp_frame = alloc_audio_frame(AV_SAMPLE_FMT_S16, c->channel_layout,
    217. c->sample_rate, nb_samples);
    218. /* copy the stream parameters to the muxer */
    219. ret = avcodec_parameters_from_context(ost->st->codecpar, c);
    220. if (ret < 0) {
    221. fprintf(stderr, "Could not copy the stream parameters\n");
    222. exit(1);
    223. }
    224. /* create resampler context */
    225. ost->swr_ctx = swr_alloc();
    226. if (!ost->swr_ctx) {
    227. fprintf(stderr, "Could not allocate resampler context\n");
    228. exit(1);
    229. }
    230. /* set options */
    231. av_opt_set_int (ost->swr_ctx, "in_channel_count", c->channels, 0);
    232. av_opt_set_int (ost->swr_ctx, "in_sample_rate", c->sample_rate, 0);
    233. av_opt_set_sample_fmt(ost->swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
    234. av_opt_set_int (ost->swr_ctx, "out_channel_count", c->channels, 0);
    235. av_opt_set_int (ost->swr_ctx, "out_sample_rate", c->sample_rate, 0);
    236. av_opt_set_sample_fmt(ost->swr_ctx, "out_sample_fmt", c->sample_fmt, 0);
    237. /* initialize the resampling context */
    238. if ((ret = swr_init(ost->swr_ctx)) < 0) {
    239. fprintf(stderr, "Failed to initialize the resampling context\n");
    240. exit(1);
    241. }
    242. }
    243. /* Prepare a 16 bit dummy audio frame of 'frame_size' samples and
    244. * 'nb_channels' channels. */
    245. static AVFrame *get_audio_frame(OutputStream *ost)
    246. {
    247. AVFrame *frame = ost->tmp_frame;
    248. int j, i, v;
    249. int16_t *q = (int16_t*)frame->data[0];
    250. /* check if we want to generate more frames */
    251. if (av_compare_ts(ost->next_pts, ost->enc->time_base,
    252. STREAM_DURATION, (AVRational){ 1, 1 }) > 0)
    253. return NULL;
    254. for (j = 0; j nb_samples; j++) {
    255. v = (int)(sin(ost->t) * 10000);
    256. for (i = 0; i < ost->enc->channels; i++)
    257. *q++ = v;
    258. ost->t += ost->tincr;
    259. ost->tincr += ost->tincr2;
    260. }
    261. frame->pts = ost->next_pts;
    262. ost->next_pts += frame->nb_samples;
    263. return frame;
    264. }
    265. /*
    266. * encode one audio frame and send it to the muxer
    267. * return 1 when encoding is finished, 0 otherwise
    268. */
    269. static int write_audio_frame(AVFormatContext *oc, OutputStream *ost)
    270. {
    271. AVCodecContext *c;
    272. AVFrame *frame;
    273. int ret;
    274. int dst_nb_samples;
    275. c = ost->enc;
    276. frame = get_audio_frame(ost);
    277. if (frame) {
    278. /* convert samples from native format to destination codec format, using the resampler */
    279. /* compute destination number of samples */
    280. dst_nb_samples = av_rescale_rnd(swr_get_delay(ost->swr_ctx, c->sample_rate) + frame->nb_samples,
    281. c->sample_rate, c->sample_rate, AV_ROUND_UP);
    282. av_assert0(dst_nb_samples == frame->nb_samples);
    283. /* when we pass a frame to the encoder, it may keep a reference to it
    284. * internally;
    285. * make sure we do not overwrite it here
    286. */
    287. ret = av_frame_make_writable(ost->frame);
    288. if (ret < 0)
    289. exit(1);
    290. /* convert to destination format */
    291. ret = swr_convert(ost->swr_ctx,
    292. ost->frame->data, dst_nb_samples,
    293. (const uint8_t **)frame->data, frame->nb_samples);
    294. if (ret < 0) {
    295. fprintf(stderr, "Error while converting\n");
    296. exit(1);
    297. }
    298. frame = ost->frame;
    299. frame->pts = av_rescale_q(ost->samples_count, (AVRational){1, c->sample_rate}, c->time_base);
    300. ost->samples_count += dst_nb_samples;
    301. }
    302. return write_frame(oc, c, ost->st, frame, ost->tmp_pkt);
    303. }
    304. /**************************************************************/
    305. /* video output */
    306. static AVFrame *alloc_picture(enum AVPixelFormat pix_fmt, int width, int height)
    307. {
    308. AVFrame *picture;
    309. int ret;
    310. picture = av_frame_alloc();
    311. if (!picture)
    312. return NULL;
    313. picture->format = pix_fmt;
    314. picture->width = width;
    315. picture->height = height;
    316. /* allocate the buffers for the frame data */
    317. ret = av_frame_get_buffer(picture, 0);
    318. if (ret < 0) {
    319. fprintf(stderr, "Could not allocate frame data.\n");
    320. exit(1);
    321. }
    322. return picture;
    323. }
    324. static void open_video(AVFormatContext *oc, const AVCodec *codec,
    325. OutputStream *ost, AVDictionary *opt_arg)
    326. {
    327. int ret;
    328. AVCodecContext *c = ost->enc;
    329. AVDictionary *opt = NULL;
    330. av_dict_copy(&opt, opt_arg, 0);
    331. /* open the codec */
    332. ret = avcodec_open2(c, codec, &opt);
    333. av_dict_free(&opt);
    334. if (ret < 0) {
    335. fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));
    336. exit(1);
    337. }
    338. /* allocate and init a re-usable frame */
    339. ost->frame = alloc_picture(c->pix_fmt, c->width, c->height);
    340. if (!ost->frame) {
    341. fprintf(stderr, "Could not allocate video frame\n");
    342. exit(1);
    343. }
    344. /* If the output format is not YUV420P, then a temporary YUV420P
    345. * picture is needed too. It is then converted to the required
    346. * output format. */
    347. ost->tmp_frame = NULL;
    348. if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
    349. ost->tmp_frame = alloc_picture(AV_PIX_FMT_YUV420P, c->width, c->height);
    350. if (!ost->tmp_frame) {
    351. fprintf(stderr, "Could not allocate temporary picture\n");
    352. exit(1);
    353. }
    354. }
    355. /* copy the stream parameters to the muxer */
    356. ret = avcodec_parameters_from_context(ost->st->codecpar, c);
    357. if (ret < 0) {
    358. fprintf(stderr, "Could not copy the stream parameters\n");
    359. exit(1);
    360. }
    361. }
    362. /* Prepare a dummy image. */
    363. static void fill_yuv_image(AVFrame *pict, int frame_index,
    364. int width, int height)
    365. {
    366. int x, y, i;
    367. i = frame_index;
    368. /* Y */
    369. for (y = 0; y < height; y++)
    370. for (x = 0; x < width; x++)
    371. pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
    372. /* Cb and Cr */
    373. for (y = 0; y < height / 2; y++) {
    374. for (x = 0; x < width / 2; x++) {
    375. pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
    376. pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
    377. }
    378. }
    379. }
    380. static AVFrame *get_video_frame(OutputStream *ost)
    381. {
    382. AVCodecContext *c = ost->enc;
    383. /* check if we want to generate more frames */
    384. if (av_compare_ts(ost->next_pts, c->time_base,
    385. STREAM_DURATION, (AVRational){ 1, 1 }) > 0)
    386. return NULL;
    387. /* when we pass a frame to the encoder, it may keep a reference to it
    388. * internally; make sure we do not overwrite it here */
    389. if (av_frame_make_writable(ost->frame) < 0)
    390. exit(1);
    391. if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
    392. /* as we only generate a YUV420P picture, we must convert it
    393. * to the codec pixel format if needed */
    394. if (!ost->sws_ctx) {
    395. ost->sws_ctx = sws_getContext(c->width, c->height,
    396. AV_PIX_FMT_YUV420P,
    397. c->width, c->height,
    398. c->pix_fmt,
    399. SCALE_FLAGS, NULL, NULL, NULL);
    400. if (!ost->sws_ctx) {
    401. fprintf(stderr,
    402. "Could not initialize the conversion context\n");
    403. exit(1);
    404. }
    405. }
    406. fill_yuv_image(ost->tmp_frame, ost->next_pts, c->width, c->height);
    407. sws_scale(ost->sws_ctx, (const uint8_t * const *) ost->tmp_frame->data,
    408. ost->tmp_frame->linesize, 0, c->height, ost->frame->data,
    409. ost->frame->linesize);
    410. } else {
    411. fill_yuv_image(ost->frame, ost->next_pts, c->width, c->height);
    412. }
    413. ost->frame->pts = ost->next_pts++;
    414. return ost->frame;
    415. }
    416. /*
    417. * encode one video frame and send it to the muxer
    418. * return 1 when encoding is finished, 0 otherwise
    419. */
    420. static int write_video_frame(AVFormatContext *oc, OutputStream *ost)
    421. {
    422. return write_frame(oc, ost->enc, ost->st, get_video_frame(ost), ost->tmp_pkt);
    423. }
    424. static void close_stream(AVFormatContext *oc, OutputStream *ost)
    425. {
    426. avcodec_free_context(&ost->enc);
    427. av_frame_free(&ost->frame);
    428. av_frame_free(&ost->tmp_frame);
    429. av_packet_free(&ost->tmp_pkt);
    430. sws_freeContext(ost->sws_ctx);
    431. swr_free(&ost->swr_ctx);
    432. }
    433. /**************************************************************/
    434. /* media file output */
    435. int main(int argc, char **argv)
    436. {
    437. OutputStream video_st = { 0 }, audio_st = { 0 };
    438. const AVOutputFormat *fmt;
    439. const char *filename;
    440. AVFormatContext *oc;
    441. const AVCodec *audio_codec, *video_codec;
    442. int ret;
    443. int have_video = 0, have_audio = 0;
    444. int encode_video = 0, encode_audio = 0;
    445. AVDictionary *opt = NULL;
    446. int i;
    447. if (argc < 2) {
    448. printf("usage: %s output_file\n"
    449. "API example program to output a media file with libavformat.\n"
    450. "This program generates a synthetic audio and video stream, encodes and\n"
    451. "muxes them into a file named output_file.\n"
    452. "The output format is automatically guessed according to the file extension.\n"
    453. "Raw images can also be output by using '%%d' in the filename.\n"
    454. "\n", argv[0]);
    455. return 1;
    456. }
    457. filename = argv[1];
    458. for (i = 2; i+1 < argc; i+=2) {
    459. if (!strcmp(argv[i], "-flags") || !strcmp(argv[i], "-fflags"))
    460. av_dict_set(&opt, argv[i]+1, argv[i+1], 0);
    461. }
    462. /* allocate the output media context */
    463. avformat_alloc_output_context2(&oc, NULL, NULL, filename);
    464. if (!oc) {
    465. printf("Could not deduce output format from file extension: using MPEG.\n");
    466. avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);
    467. }
    468. if (!oc)
    469. return 1;
    470. fmt = oc->oformat;
    471. /* Add the audio and video streams using the default format codecs
    472. * and initialize the codecs. */
    473. if (fmt->video_codec != AV_CODEC_ID_NONE) {
    474. add_stream(&video_st, oc, &video_codec, fmt->video_codec);
    475. have_video = 1;
    476. encode_video = 1;
    477. }
    478. if (fmt->audio_codec != AV_CODEC_ID_NONE) {
    479. add_stream(&audio_st, oc, &audio_codec, fmt->audio_codec);
    480. have_audio = 1;
    481. encode_audio = 1;
    482. }
    483. /* Now that all the parameters are set, we can open the audio and
    484. * video codecs and allocate the necessary encode buffers. */
    485. if (have_video)
    486. open_video(oc, video_codec, &video_st, opt);
    487. if (have_audio)
    488. open_audio(oc, audio_codec, &audio_st, opt);
    489. av_dump_format(oc, 0, filename, 1);
    490. /* open the output file, if needed */
    491. if (!(fmt->flags & AVFMT_NOFILE)) {
    492. ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
    493. if (ret < 0) {
    494. fprintf(stderr, "Could not open '%s': %s\n", filename,
    495. av_err2str(ret));
    496. return 1;
    497. }
    498. }
    499. /* Write the stream header, if any. */
    500. ret = avformat_write_header(oc, &opt);
    501. if (ret < 0) {
    502. fprintf(stderr, "Error occurred when opening output file: %s\n",
    503. av_err2str(ret));
    504. return 1;
    505. }
    506. while (encode_video || encode_audio) {
    507. /* select the stream to encode */
    508. if (encode_video &&
    509. (!encode_audio || av_compare_ts(video_st.next_pts, video_st.enc->time_base,
    510. audio_st.next_pts, audio_st.enc->time_base) <= 0)) {
    511. encode_video = !write_video_frame(oc, &video_st);
    512. } else {
    513. encode_audio = !write_audio_frame(oc, &audio_st);
    514. }
    515. }
    516. /* Write the trailer, if any. The trailer must be written before you
    517. * close the CodecContexts open when you wrote the header; otherwise
    518. * av_write_trailer() may try to use memory that was freed on
    519. * av_codec_close(). */
    520. av_write_trailer(oc);
    521. /* Close each codec. */
    522. if (have_video)
    523. close_stream(oc, &video_st);
    524. if (have_audio)
    525. close_stream(oc, &audio_st);
    526. if (!(fmt->flags & AVFMT_NOFILE))
    527. /* Close the output file. */
    528. avio_closep(&oc->pb);
    529. /* free the stream */
    530. avformat_free_context(oc);
    531. return 0;
    532. }

  • 相关阅读:
    Linux 系统 Vi和Vim编辑器—笔记7
    java技术专家面试指南80问【java学习+面试宝典】(九)
    MySQL的SQL 优化:提升数据库性能
    树形DP小结
    PS的抠图算法原理剖析 1
    大数据之Stream流
    回溯算法题目
    Android versionCode会变成指定数值加001、002、003等后缀
    云服务器CVM_云主机_云计算服务器_弹性云服务器-腾讯云
    【leetcode】1137. 第 N 个泰波那契数
  • 原文地址:https://blog.csdn.net/Lemon_D1999/article/details/140315030