• FFmpeg与其他库的交互


    转载:https://www.cnblogs.com/lidabo/p/15826544.html

    视频软件不是仅靠一个FFmpeg就能完成。FFmpeg只包括流的读写及编码解码,其他操作都得靠其他库来实现。比如播放视频需要界面库;播放声音需要播放声音的库或系统API等。所以这一块也是重中之重。下面我介绍几种常用FFmpeg与其他库交互方式。

    将avframe与qt互转

    AVFrame转QImage比较简单,不过这儿有一个坑在于,直接指定数据的方式上面,一旦数据失效那么图片也会失效。此处的比较友好的做法是再构造一次

    1. // AVFrame 转 QImage
    2. // 首先确保图像帧格式为 AV_PIX_FMT_RGB32,如果不是,那么转一次
    3. AVFrame *_frame_rgb32 = _video_format_convet (_frame_xxx, AV_PIX_FMT_RGB32);
    4. // 此处 _img_tmp 即可用,与 _frame_rgb32 共用数据区域
    5. QImage _img_tmp (_frame_rgb32->data [0], _frame_rgb32->width, _frame_rgb32->height, QImage::Format_RGB32);
    6. //但如果想让 _frame_rgb32 释放后继续使用,那么需要再构造一次
    7. QImage _img { 640, 480, QImage::Format_ARGB32 };
    8. QPainter _p (&_img);
    9. _p.drawImage (_img.rect (), _img_tmp, _img_tmp.rect ());
    10. // QImage 转 AVFrame
    11. AVFrame *_frame_rgb32 = av_frame_alloc ();
    12. _frame_rgb32->width = img.width ();
    13. _frame_rgb32->height = img.height ();
    14. _frame_rgb32->format = AV_PIX_FMT_RGB32;
    15. av_frame_get_buffer (_frame_rgb32, 0);
    16. memcpy (_frame_rgb32->data [0], img.bits (), _frame_rgb32->width * _frame_rgb32->height * 4);

    将avframe与gdiplus互转

    AVFrame转Gdiplus::Bitmap也是比较容易的,不过还是需要注意GDI+初始化不要忘了

    1. // AVFrame 转 Gdiplus::Bitmap
    2. // 此时 _frame_rgb32 为有效 AVFrame
    3. Gdiplus::Bitmap _bmp (_frame_rgb32->width, _frame_rgb32->height, PixelFormat32bppARGB);
    4. Gdiplus::BitmapData _data;
    5. _bmp.LockBits (&Gdiplus::Rect (0, 0, _bmp.GetWidth (), _bmp.GetHeight ()), Gdiplus::ImageLockModeWrite, PixelFormat32bppARGB, &_data);
    6. memcpy (_data.Scan0, _frame_rgb32->data [0], _frame_rgb32->width * _frame_rgb32->height * 4);
    7. _bmp.UnlockBits (&_data);
    8. // Gdiplus::Bitmap 转 AVFrame
    9. // 此时 _bmp 为有效 Gdiplus::Bitmap
    10. AVFrame *_frame_rgb32 = av_frame_alloc ();
    11. _frame_rgb32->width = _bmp.GetWidth ();
    12. _frame_rgb32->height = _bmp.GetHeight ();
    13. _frame_rgb32->format = AV_PIX_FMT_RGB32;
    14. av_frame_get_buffer (_frame_rgb32, 0);
    15. Gdiplus::BitmapData _data;
    16. _bmp.LockBits (&Gdiplus::Rect (0, 0, _bmp.GetWidth (), _bmp.GetHeight ()), Gdiplus::ImageLockModeRead, PixelFormat32bppARGB, &_data);
    17. memcpy (_frame_rgb32->data [0], _data.Scan0, _frame_rgb32->width * _frame_rgb32->height * 4);
    18. _bmp.UnlockBits (&_data);

    使用sdl1播放视频

    1. // 首先是SDL初始化代码
    2. SDL_Init (SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER);
    3. // 然后是创建SDL2窗口及SDL2图片
    4. SDL_Surface *_screen = SDL_SetVideoMode (_frame_yuv420p->width, _frame_yuv420p->height, 0, SDL_SWSURFACE);
    5. SDL_Overlay *_bmp = SDL_CreateYUVOverlay (_frame_yuv420p->width, _frame_yuv420p->height, SDL_YV12_OVERLAY, _screen);
    6. SDL_Rect _rect { 0, 0, _frame_yuv420p->width, _frame_yuv420p->height };
    7. // 然后是循环播放内部,将AVFrame结构体数据移动到SDL图片上
    8. SDL_LockYUVOverlay (_bmp);
    9. memcpy (_bmp->pixels [0], _frame_yuv420p->data [0], _frame_yuv420p->width * _frame_yuv420p->height);
    10. memcpy (_bmp->pixels [1], _frame_yuv420p->data [1], _frame_yuv420p->width * _frame_yuv420p->height / 4);
    11. memcpy (_bmp->pixels [2], _frame_yuv420p->data [2], _frame_yuv420p->width * _frame_yuv420p->height / 4);
    12. _frame_yuv420p->linesize [0] = _bmp->pitches [0];
    13. _frame_yuv420p->linesize [1] = _bmp->pitches [2];
    14. _frame_yuv420p->linesize [2] = _bmp->pitches [1];
    15. SDL_UnlockYUVOverlay (_bmp);
    16. SDL_DisplayYUVOverlay (_bmp, &_rect);
    17. // 暂停
    18. SDL_Delay (50);
    19. // 展示完毕,退出
    20. SDL_Quit ();

    使用sdl2播放视频

    1. // 首先是SDL初始化代码
    2. SDL_Init (SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER);
    3. // 然后是创建SDL2窗口及SDL2图片
    4. SDL_Window *_screen = SDL_CreateWindow ("My Window", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, 640, 480, SDL_WINDOW_OPENGL | SDL_WINDOW_RESIZABLE);
    5. SDL_Renderer* _sdlRenderer = SDL_CreateRenderer (_screen, -1, 0);
    6. SDL_Texture* _sdlTexture = SDL_CreateTexture (_sdlRenderer, SDL_PIXELFORMAT_YV12, SDL_TEXTUREACCESS_STREAMING, 640, 480);
    7. SDL_Rect _rect { 0, 0, 640, 480 };
    8. // 然后是循环播放内部,将AVFrame结构体数据移动到SDL2图片上
    9. int _sz = _frame_yuv420p->width * _frame_yuv420p->height;
    10. uint8_t *_buf = new uint8_t [_sz * 3 / 2];
    11. memcpy (_buf, _frame_yuv420p->data [0], _sz);
    12. memcpy (_buf + _sz * 5 / 4, _frame_yuv420p->data [1], _sz / 4);
    13. memcpy (_buf + _sz, _frame_yuv420p->data [2], _sz / 4);
    14. SDL_UpdateTexture (_sdlTexture, NULL, _buf, 640);
    15. SDL_RenderClear (_sdlRenderer);
    16. SDL_RenderCopy (_sdlRenderer, _sdlTexture, NULL, &_rect);
    17. SDL_RenderPresent (_sdlRenderer);
    18. delete _buf;
    19. // 暂停
    20. SDL_Delay (50);
    21. // 展示完毕,退出
    22. SDL_Quit ();

    使用sdl2播放音频

    SDL2播放音频有一个问题在于,仅支持单例。也就是不能创建两个对象一起播放。

    1. // 全局变量,用于控制播音进度,通常实现写在类里面作为类成员变量
    2. volatile Uint8 *_audio_pos = nullptr;
    3. volatile Uint32 _audio_len = 0;
    4. // 回调函数。实际播音通过这个函数来实现
    5. void _fill_audio (void *udata, Uint8 *stream, int len) {
    6. // 如果控制进度写到类/结构体成员,那么udata就传递指针
    7. SDL_memset (stream, 0, len);
    8. if (_audio_len == 0)
    9. return;
    10. len = (len > (int) _audio_len ? _audio_len : len);
    11. //SDL_MixAudio (stream, (const Uint8*) _audio_pos, len, SDL_MIX_MAXVOLUME);
    12. SDL_MixAudioFormat (stream, (const Uint8*) _audio_pos, AUDIO_S16, len, SDL_MIX_MAXVOLUME);
    13. _audio_pos += len;
    14. _audio_len -= len;
    15. }
    16. // 初始化
    17. SDL_AudioSpec _spec;
    18. _spec.freq = _frame_s16->sample_rate;
    19. _spec.format = AUDIO_S16SYS;
    20. _spec.channels = _frame_s16->channels;
    21. _spec.silence = 0;
    22. _spec.samples = 1024;
    23. _spec.callback = &AudioPlay2::_fill_audio;
    24. _spec.userdata = nullptr; // 或者 this,根据需求决定是否传递
    25. if (int _ret = SDL_OpenAudio (&_spec, NULL)) {
    26. printf ("SDL_OpenAudio failed\n");
    27. return;
    28. }
    29. SDL_PauseAudio (0);
    30. // 开始播放过程,每读一帧就调用一遍
    31. _audio_pos = _frame_s16->data [0];
    32. _audio_len = _frame_s16->nb_samples * _frame_s16->channels * 2;
    33. while (_audio_len > 0)
    34. SDL_Delay (1);
    35. // 释放
    36. SDL_CloseAudio ();

    使用wave函数播放音频

    wave系列函数仅支持Windows平台,范围从WinXP到Win10,比较古老,但相对于SDL2来说可以少引用一个库。另外这两者的实现特别像,几乎可以不用修改架构实现与SDL2的互相迁移。

    1. // 全局变量,用于控制播放进度
    2. volatile LPWAVEHDR _pwh = nullptr;
    3. // 回调函数具有Win32特色,需要指定调用协定
    4. void CALLBACK _wave_out_proc (HWAVEOUT hwo, UINT uMsg, DWORD_PTR dwInstance, DWORD_PTR dwParam1, DWORD_PTR dwParam2) {
    5. // 此处 dwInstance 为用户指定的指针
    6. if (WOM_DONE == uMsg) {
    7. while (_pwh) {
    8. std::this_thread::sleep_for (std::chrono::milliseconds (1));
    9. }
    10. _pwh = (LPWAVEHDR) dwParam1;
    11. }
    12. }
    13. // 初始化
    14. WAVEFORMATEX _wfex;
    15. _wfex.wFormatTag = WAVE_FORMAT_PCM;
    16. _wfex.nChannels = (WORD) _frame_s16->channels;
    17. _wfex.nSamplesPerSec = (DWORD) _frame_s16->sample_rate;
    18. _wfex.wBitsPerSample = 16;
    19. _wfex.cbSize = sizeof (_wfex);
    20. _wfex.nBlockAlign = _wfex.wBitsPerSample * _wfex.nChannels / 8;
    21. _wfex.nAvgBytesPerSec = m_wfex.nSamplesPerSec * m_wfex.nBlockAlign;
    22. if (::waveOutOpen (nullptr, 0, &_wfex, 0, 0, WAVE_FORMAT_QUERY) != 0) {
    23. printf ("waveOutOpen failed\n");
    24. return;
    25. }
    26. if (::waveOutOpen (&_hwo, WAVE_MAPPER, &_wfex, (DWORD_PTR) _wave_out_proc, (DWORD_PTR) this, CALLBACK_FUNCTION) != 0) {
    27. printf ("waveOutOpen failed\n");
    28. return;
    29. }
    30. // 开始播放过程,每读一帧就调用一遍
    31. LPWAVEHDR _pwh = new WAVEHDR;
    32. if (!_pwh)
    33. return false;
    34. memset (_pwh, 0, sizeof (WAVEHDR));
    35. _pwh->dwLoops = 1;
    36. _pwh->dwBufferLength = (DWORD) _frame_s16->nb_samples * _frame_s16->channels * 2;
    37. _pwh->lpData = new char [_pwh->dwBufferLength];
    38. if (!_pwh->lpData) {
    39. delete _pwh;
    40. return;
    41. }
    42. memcpy (_pwh->lpData, _frame_s16->data[0], _pwh->dwBufferLength);
    43. if (::waveOutPrepareHeader (_hwo, _pwh, sizeof (WAVEHDR)) != 0) {
    44. delete [] _pwh->lpData;
    45. delete _pwh;
    46. return;
    47. }
    48. if (::waveOutWrite (_hwo, _pwh, sizeof (WAVEHDR)) != 0) {
    49. delete [] _pwh->lpData;
    50. delete _pwh;
    51. return;
    52. }
    53. // 释放
    54. if (_hwo) {
    55. ::waveOutReset (_hwo);
    56. ::waveOutClose (_hwo);
    57. _hwo = NULL;
    58. }

    枚举dshow设备

    这个用到了dshow函数,操作稍微有点麻烦,获取一个设备需要一大堆代码。建议不要深究,需要的时候直接复制就好啦。

    此处不用FFmpeg的原因是,截至目前FFmpeg暂不支持以编程的方式获取列表,如果通过读控制台的方式还涉及到解析等,说白了还是FFmpeg的锅。

    地址:DirectShow – FFmpeg

    1. // dshow头文件
    2. #include <string>
    3. #include <vector>
    4. #include <dshow.h>
    5. #include <dvdmedia.h>
    6. #pragma comment (lib, "Strmiids.lib")
    7. #pragma comment (lib, "Winmm.lib")
    8. // 注意调用枚举前需要调用COM+初始化
    9. ::CoInitializeEx (NULL, COINIT_APARTMENTTHREADED);
    10. // 枚举所有dshow视频设备
    11. std::vector<std::wstring> _video_names;
    12. ICreateDevEnum* pSysDevEnum = nullptr;
    13. if (SUCCEEDED (CoCreateInstance (CLSID_SystemDeviceEnum, nullptr, CLSCTX_INPROC_SERVER, IID_ICreateDevEnum, (void**) &pSysDevEnum))) {
    14. IEnumMoniker* pEnumCat = nullptr;
    15. if (SUCCEEDED (pSysDevEnum->CreateClassEnumerator (CLSID_VideoInputDeviceCategory, &pEnumCat, 0))) {
    16. IMoniker* pMoniker = nullptr;
    17. ULONG cFetched = 0;
    18. while (SUCCEEDED (pEnumCat->Next (1, &pMoniker, &cFetched)) && cFetched) {
    19. IPropertyBag* pPropBag = nullptr;
    20. if (SUCCEEDED (pMoniker->BindToStorage (nullptr, nullptr, IID_IPropertyBag, (void**) &pPropBag))) {
    21. VARIANT varName;
    22. VariantInit (&varName);
    23. if (SUCCEEDED (pPropBag->Read (L"FriendlyName", &varName, 0))) {
    24. _video_names.push_back (varName.bstrVal);
    25. }
    26. VariantClear (&varName);
    27. if (pPropBag)
    28. pPropBag->Release ();
    29. }
    30. if (pPropBag)
    31. pPropBag->Release ();
    32. }
    33. if (pMoniker)
    34. pMoniker->Release ();
    35. }
    36. if (pEnumCat)
    37. pEnumCat->Release ();
    38. }
    39. if (pSysDevEnum)
    40. pSysDevEnum->Release ();
    41. // 枚举所有dshow音频设备
    42. std::vector<std::wstring> _audio_names;
    43. ICreateDevEnum* pSysDevEnum = NULL;
    44. if (SUCCEEDED (CoCreateInstance (CLSID_SystemDeviceEnum, NULL, CLSCTX_INPROC_SERVER, IID_ICreateDevEnum, (void**) &pSysDevEnum))) {
    45. IEnumMoniker* pEnumCat = nullptr;
    46. if (SUCCEEDED (pSysDevEnum->CreateClassEnumerator (CLSID_AudioInputDeviceCategory, &pEnumCat, 0))) {
    47. IMoniker* pMoniker = nullptr;
    48. ULONG cFetched;
    49. while (SUCCEEDED (pEnumCat->Next (1, &pMoniker, &cFetched)) && cFetched) {
    50. IPropertyBag* pPropBag = nullptr;
    51. if (SUCCEEDED (pMoniker->BindToStorage (nullptr, nullptr, IID_IPropertyBag, (void**) &pPropBag))) {
    52. VARIANT varName;
    53. VariantInit (&varName);
    54. if (SUCCEEDED (pPropBag->Read (L"FriendlyName", &varName, 0)))
    55. _audio_names.push_back (varName.bstrVal);
    56. VariantClear (&varName);
    57. }
    58. if (pPropBag)
    59. pPropBag->Release ();
    60. pMoniker->Release ();
    61. pMoniker = nullptr;
    62. }
    63. if (pMoniker)
    64. pMoniker->Release ();
    65. }
    66. if (pEnumCat)
    67. pEnumCat->Release ();
    68. }
    69. if (pSysDevEnum)
    70. pSysDevEnum->Release ();

    使用windows捕获扬声器输出

    一个通过调用COM+组件实现的例子,代码比较多,不建议实际去研究,用着没问题就行了。有一个问题需要注意下:Win764位系统上不支持设置通道数,一旦设置后很容易捕获失败,所以后面需要自己手动转一次。代码中已经已经将音频数据转为了FFmpeg可用的AVFrame,可以直接用于处理或转码。

    1. HRESULT _r = 0;
    2. DWORD _nTaskIndex = 0;
    3. REFERENCE_TIME _hnsDefaultDevicePeriod = 0;
    4. LARGE_INTEGER _liFirstFire { 0 };
    5. //
    6. HANDLE _hEventStarted = ::CreateEvent (NULL, TRUE, FALSE, NULL);
    7. HANDLE _hEventStop = ::CreateEvent (NULL, TRUE, FALSE, NULL);
    8. HANDLE _hTimerWakeUp = ::CreateWaitableTimer (NULL, FALSE, NULL);
    9. HANDLE _hTask = AvSetMmThreadCharacteristics (_T ("Capture"), &_nTaskIndex);
    10. SetEvent (_hEventStarted);
    11. AVFrame *_frame = av_frame_alloc ();
    12. //
    13. IMMDeviceEnumerator *_pEnumerator = nullptr;
    14. IMMDevice *_pDevice = nullptr;
    15. IAudioClient *_pAudioClient = nullptr;
    16. WAVEFORMATEX *_pwfx = nullptr;
    17. IAudioCaptureClient *_pCaptureClient = nullptr;
    18. do {
    19. if (FAILED (_r = CoCreateInstance (__uuidof(MMDeviceEnumerator), NULL, CLSCTX_ALL, __uuidof(IMMDeviceEnumerator), (void**) &_pEnumerator))) {
    20. LOG_INFO ("CoCreateInstance failed %d", _r);
    21. break;
    22. }
    23. if (FAILED (_r = _pEnumerator->GetDefaultAudioEndpoint (eRender, eConsole, &_pDevice))) {
    24. LOG_INFO ("_pEnumerator->GetDefaultAudioEndpoint failed %d", _r);
    25. break;
    26. }
    27. if (FAILED (_r = _pDevice->Activate (__uuidof(IAudioClient), CLSCTX_ALL, NULL, (void**) &_pAudioClient))) {
    28. LOG_INFO ("_pDevice->Activate failed %d", _r);
    29. break;
    30. }
    31. if (FAILED (_r = _pAudioClient->GetDevicePeriod (&_hnsDefaultDevicePeriod, NULL))) {
    32. LOG_INFO ("_pAudioClient->GetDevicePeriod failed %d", _r);
    33. break;
    34. }
    35. if (FAILED (_r = _pAudioClient->GetMixFormat (&_pwfx))) {
    36. LOG_INFO ("_pAudioClient->GetMixFormat failed %d", _r);
    37. break;
    38. }
    39. //
    40. _pwfx->wBitsPerSample = 16;
    41. _pwfx->nSamplesPerSec = _sample_rate;
    42. //_pwfx->nChannels = _channel_num;
    43. _pwfx->nBlockAlign = _pwfx->nChannels * _pwfx->wBitsPerSample / 8;
    44. _pwfx->nAvgBytesPerSec = _pwfx->nBlockAlign * _pwfx->nSamplesPerSec;
    45. if (_pwfx->wFormatTag == WAVE_FORMAT_IEEE_FLOAT) {
    46. _pwfx->wFormatTag = WAVE_FORMAT_PCM;
    47. } else if (_pwfx->wFormatTag == WAVE_FORMAT_EXTENSIBLE) {
    48. PWAVEFORMATEXTENSIBLE _pEx = reinterpret_cast<PWAVEFORMATEXTENSIBLE>(_pwfx);
    49. if (IsEqualGUID (KSDATAFORMAT_SUBTYPE_IEEE_FLOAT, _pEx->SubFormat)) {
    50. _pEx->SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
    51. _pEx->Samples.wValidBitsPerSample = _pwfx->wBitsPerSample;
    52. }
    53. } else {
    54. LOG_INFO ("unknown format 0x%04X", _pwfx->wFormatTag);
    55. break;
    56. }
    57. //
    58. size_t _FrameSize = (_pwfx->wBitsPerSample / 8) * _pwfx->nChannels;// 每帧长度(字节)
    59. _frame->channels = _pwfx->nChannels;
    60. _frame->channel_layout = av_get_default_channel_layout (_frame->channels);
    61. _frame->sample_rate = _pwfx->nSamplesPerSec;
    62. _frame->format = AV_SAMPLE_FMT_S16;
    63. //_frame->format = AV_SAMPLE_FMT_FLT;
    64. //
    65. if (FAILED (_r = _pAudioClient->Initialize (AUDCLNT_SHAREMODE_SHARED, AUDCLNT_STREAMFLAGS_LOOPBACK, 0, 0, _pwfx, nullptr))) {
    66. LOG_INFO ("_pAudioClient->Initialize failed %d", _r);
    67. break;
    68. }
    69. if (FAILED (_r = _pAudioClient->GetService (__uuidof(IAudioCaptureClient), (void**) &_pCaptureClient))) {
    70. LOG_INFO ("_pAudioClient->GetService failed %d", _r);
    71. break;
    72. }
    73. _liFirstFire.QuadPart = -_hnsDefaultDevicePeriod / 2; // negative means relative time
    74. LONG _lTimeBetweenFires = (LONG) _hnsDefaultDevicePeriod / 2 / (10 * 1000); // convert to milliseconds
    75. if (!SetWaitableTimer (_hTimerWakeUp, &_liFirstFire, _lTimeBetweenFires, NULL, NULL, FALSE)) {
    76. LOG_INFO ("SetWaitableTimer failed %d", ::GetLastError ());
    77. break;
    78. }
    79. if (FAILED (_r = _pAudioClient->Start ())) {
    80. LOG_INFO ("_pAudioClient->Start failed %d", _r);
    81. break;
    82. }
    83. //
    84. HANDLE _waitArray [2] = { _hEventStop, _hTimerWakeUp };
    85. while (true) {
    86. DWORD _dwWaitResult = WaitForMultipleObjects (_countof (_waitArray), _waitArray, FALSE, INFINITE);
    87. if (WAIT_OBJECT_0 + 1 != _dwWaitResult)
    88. break;
    89. UINT32 _nNextPacketSize = 0;
    90. if (FAILED (_r = _pCaptureClient->GetNextPacketSize (&_nNextPacketSize)))
    91. break;
    92. if (_nNextPacketSize == 0)
    93. continue;
    94. //
    95. BYTE *_pData = nullptr;
    96. UINT32 _nNumFramesToRead = 0;
    97. DWORD _dwFlags = 0;
    98. if (FAILED (_r = _pCaptureClient->GetBuffer (&_pData, &_nNumFramesToRead, &_dwFlags, nullptr, nullptr))) {
    99. break;
    100. }
    101. if (_nNumFramesToRead == 0)
    102. continue;
    103. if (_frame->nb_samples != _nNumFramesToRead) {// * _pwfx->nChannels
    104. if (_frame->data [0])
    105. av_frame_unref (_frame);
    106. _frame->nb_samples = _nNumFramesToRead;// * _pwfx->nChannels
    107. av_frame_get_buffer (_frame, 1);
    108. }
    109. //
    110. if ((_dwFlags & AUDCLNT_BUFFERFLAGS_SILENT) > 0) {
    111. memset (_frame->data [0], 0, _nNumFramesToRead*_FrameSize);
    112. } else {
    113. ::CopyMemory (_frame->data [0], _pData, _nNumFramesToRead*_FrameSize);
    114. }
    115. // 此处已转码成功
    116. // _callback (_frame);
    117. _pCaptureClient->ReleaseBuffer (_nNumFramesToRead);
    118. }
    119. } while (false);
    120. //
    121. if (_pCaptureClient)
    122. _pCaptureClient->Release ();
    123. av_frame_free (&_frame);
    124. if (_pwfx)
    125. CoTaskMemFree (_pwfx);
    126. if (_pAudioClient)
    127. _pAudioClient->Release ();
    128. if (_pDevice)
    129. _pDevice->Release ();
    130. if (_pEnumerator)
    131. _pEnumerator->Release ();
    132. AvRevertMmThreadCharacteristics (_hTask);
    133. ::CloseHandle (_hTimerWakeUp);
    134. ::CloseHandle (_hEventStop);
    135. ::CloseHandle (_hEventStarted);

    程序结构

    对于播放器来说,只需要解码然后展示就行了

    1. #include <头文件>
    2. int main (int argc, char* argv[]) {
    3. // 初始化
    4. // 打开输入流
    5. new_thread {
    6. while (_run) {
    7. // 图像处理
    8. // 从摄像头输入流中读一个AVPacket
    9. // 将AVPacket解码为AVFrame
    10. // 根据实际需求考虑是否需要转换AVFrame的像素格式
    11. // 展示图片(SDL2或者其他界面库)
    12. }
    13. }
    14. new_thread {
    15. while (_run) {
    16. // 音频处理
    17. // 从麦克风输入流中读一个AVPacket
    18. // 将AVPacket解码为AVFrame
    19. // 根据实际需求考虑是否需要转换AVFrame的采样格式
    20. // 播放声音(SDL2或其他音频库)
    21. }
    22. }
    23. while (_run) {
    24. // wait...
    25. }
    26. // 关闭输入流
    27. return 0;
    28. }

    对于一个视频监控软件来说,由于需要一边读摄像头一边展示,还要一边存文件,但不用读声音了,所以架构就是把这两者合并起来

    1. #include <头文件>
    2. int main (int argc, char* argv[]) {
    3. // 初始化
    4. // 打开摄像头
    5. // 打开输出流
    6. new_thread {
    7. while (_run) {
    8. // 图像处理
    9. // 从摄像头输入流中读一个AVPacket
    10. // 将AVPacket解码为AVFrame
    11. // 根据实际需求考虑是否需要转换AVFrame的像素格式
    12. // 展示图片(SDL2或者其他界面库)
    13. // 将AVFrame编码为AVPacket
    14. // send一帧
    15. // 控制速度
    16. }
    17. }
    18. while (_run) {
    19. // wait...
    20. }
    21. // 关闭输出流
    22. // 关闭摄像头输入流
    23. return 0;
    24. }

  • 相关阅读:
    IDA软件为什么运行不起来
    微信小程序入门级
    Fisher辨别分析
    ORM概念
    JVM内存模型
    树莓派4B_OpenCv学习笔记19:OpenCV舵机云台物体追踪
    做大数据开发一定要知道几个重要的框架和工具及知识点
    【最新版】ChatGPT/GPT4科研应用与AI绘图论文写作(最新增加Claude3、Gemini、Sora、GPTs技术及AI领域中的集中大模型的最新技术)
    STM32收入高也好找工作,所以我学嵌入式linux终究是错付了吗
    Spring boot项目集成security
  • 原文地址:https://blog.csdn.net/yaningli/article/details/128031216