本示例看完后,可基本掌握以下内容
1、利用opencv+gstreamer拉流推流的基本步骤
2、可学习gstreamer,图像混合的一些插件用法
3、TX2 NX上的视频编解码与硬件加速,H264编码参数调整
4、linux下如何提高线程优先级
我需要实现的功能是在TX2 NX上,拉取摄像头数据,并在摄像头数据上与另一张图片混合
混合后推流到rtsp服务器。
由于混合的时候需要保留透明度,但是OpenCV不支持四通道的数据写入。硬件加速混合插件无法剔除透明度。当然可以用内存(不是显存,无法硬件加速)中剔除指定颜色的插件实现,但效率太低
所以只能利用VideoCapture先拉到摄像头数据,利用要混合的图片,手动计算叠加。
叠加后利用VideoWriter推流到rtsp 服务器
为提高推流效率,提高了推流线程优先级。
具体流程:
首先搭建rtsp服务端,可利用rtsp-simple-serve这个项目在局域网部署一套
安装gstreamer后
由于用到了rtspclientsink插件,所以需要安装rtsp插件
// sudo apt-get update
// sudo apt-get upgrade
// sudo apt install gstreamer1.0-rtsp
下面是利用VideoWriter推流的过程
- //cpp 实现
- #include "rtsp_push_stream.h"
-
-
- static int get_thread_policy(pthread_attr_t& attr) {
- int policy;
- int rs = pthread_attr_getschedpolicy(&attr, &policy);
- assert(rs == 0);
- switch (policy) {
- case SCHED_FIFO:
- cout << "policy = SCHED_FIFO" << endl;
- break;
-
- case SCHED_RR:
- cout << "policy = SCHED_RR" << endl;
- break;
-
- case SCHED_OTHER:
- cout << "policy = SCHED_OTHER" << endl;
- break;
-
- default:
- cout << "policy = UNKNOWN" << endl;
- break;
- }
-
- return policy;
- }
-
- static void show_thread_priority(pthread_attr_t& attr, int policy) {
- int priority = sched_get_priority_max(policy);
- assert(priority != -1);
- cout << "max_priority = " << priority << endl;
-
- priority = sched_get_priority_min(policy);
- assert(priority != -1);
- cout << "min_priority = " << priority << endl;
- }
-
- static int get_thread_priority(pthread_attr_t& attr) {
- struct sched_param param;
-
- int rs = pthread_attr_getschedparam(&attr, ¶m);
- assert(rs == 0);
- cout << "priority = " << param.__sched_priority << endl;
-
- return param.__sched_priority;
- }
-
- static void set_thread_policy(pthread_attr_t& attr, int policy) {
- int rs = pthread_attr_setschedpolicy(&attr, policy);
- assert(rs == 0);
- get_thread_policy(attr);
- }
-
- RtspPushStream::RtspPushStream() : active_(true) {}
-
- RtspPushStream::~RtspPushStream() {}
- void RtspPushStream::start() {
-
- string appsrcpipeline =
- "appsrc ! video/x-raw, format=BGR ! queue ! videoconvert ! video/x-raw,format=RGBA ! nvvidconv ! nvv4l2h264enc "
- "! h264parse ! qtmux ! filesink location={filename} sync=false";
-
- //使用 rtspclientsink 需要安装插件
- // sudo apt-get update
- // sudo apt-get upgrade
- // sudo apt install gstreamer1.0-rtsp
-
-
- std::string pipeline_useglvideomixer =
- "appsrc "
- "! video/x-raw, format=BGR "
- "! videoconvert "
- "! video/x-raw,format=(string)RGBA , width=(int)1024, height=(int)600"
- "! queue2"
- "! alpha method=blue "
- "! glvideomixer name = compos sink_0::zorder=1 sink_0::alpha=0.85 sink_1::alpha=1 sink_1::zorder=0 "
- "sink_1::width=1024 sink_1::height=600 "
- "! nvvidconv"
- "! video/x-raw(memory:NVMM), format=(string)I420, width=(int)1024, height=(int)600"
- "! nvv4l2h264enc "
- "! rtspclientsink location=rtsp://192.168.20.99:8554/my_pipeline"
- " nvarguscamerasrc"
- "! video/x-raw(memory:NVMM),format=(string)NV12, width=(int)1640, height=(int)1232, framerate=(fraction)25/1"
- "! queue2"
- "! nvvidconv left=0 right=1640 top=136 bottom=1096 "
- "! compos. ";
-
- // nvcompsositor 的两个输入必须是一样的图片格式和内存形式,需要将云图透明部分用白色(255,255,255)填充
- std::string pipeline_nvcompsositor =
- "appsrc "
- "! video/x-raw, format=BGR "
- "! videoconvert "
- "! video/x-raw,format=(string)RGBA, width=(int)1024, height=(int)600"
- "! nvvidconv "
- "! queue2"
- "! nvcompositor name = compos sink_0::zorder=1 sink_0::alpha=0.5 "
- "sink_1::alpha=1 "
- "sink_1::zorder=0 sink_1::width=1024 sink_1::height=600 "
- "! nvvidconv "
- "! nvv4l2h264enc "
- "! rtspclientsink location=rtsp://192.168.20.99:8554/my_pipeline"
- " nvarguscamerasrc "
- "! video/x-raw(memory:NVMM), format=(string)NV12, width=(int)1640, height=(int)1232,framerate=(fraction)25/1 "
- "! nvvidconv left=0 right=1640 top=136 bottom=1096 "
- "! video/x-raw,format=(string)RGBA, width=(int)1024, height=(int)600 "
- "! videobalance brightness=0.3 "
- "! nvvidconv "
- "! queue2"
- "! compos. ";
-
- video_writer_.open(pipeline_nvcompsositor, cv::CAP_GSTREAMER, 0, 25, cv::Size(1024, 600));
- mat_ = cv::imread("test.jpg");
- write_thread_ = make_shared
(&RtspPushStream::run, this); - write_thread_->join();
- }
-
- void RtspPushStream::run() {
- int id = 0;
- while (active_) {
- space_mat_ = cv::Mat(600, 1024, CV_8UC3, cv::Scalar(0, 0, 0));
-
-
- video_writer_.write(space_mat_);
-
- std::this_thread::sleep_for(std::chrono::milliseconds(3));
- }
- }
-
- void RtspPushStream::end() {
- active_ = false;
- //LOG::info("RtspPushStream::end()!");
- }
-
- void RtspPushStream::start_capture() {
-
- std::string pipeline_camear_capture =
- " nvarguscamerasrc "
- "! video/x-raw(memory:NVMM), format=(string)NV12, width=(int)1640, "
- "height=(int)1232,framerate=(fraction)30/1 "
- "! nvvidconv left=0 right=1640 top=136 bottom=1096 "
- "! video/x-raw,format=(string)I420, width=(int)1024, height=(int)600 "
- "! videoconvert "
- "! video/x-raw,format=(string)BGR "
- "! appsink";
-
- video_capture_.open(pipeline_camear_capture, cv::CAP_GSTREAMER);
- if (!video_capture_.isOpened()) {
- //LOG::error("Failed to open VideoCapture");
- return;
- }
- mat_ = cv::imread("test.jpg");
- std::string pipeline_video_writer =
- "appsrc "
- "! video/x-raw, format=BGR "
- "! videoconvert "
- "! nvvidconv "
- "! nvv4l2h264enc iframeinterval=10 idrinterval=32 " //这里增加H264 i帧与idr帧次数,降低webrtc拉流时卡顿的频率和时间
- "! rtspclientsink location=rtsp://192.168.20.99:8554/my_pipeline";
-
- video_writer_.open(pipeline_video_writer, cv::CAP_GSTREAMER, 0, 20, cv::Size(1024, 600));
- cap_thread_ = make_shared
(&RtspPushStream::run_capture, this); - cap_thread_->detach();
- }
-
- void RtspPushStream::run_capture() {
- pthread_attr_t attr;
- struct sched_param sched;
- int rs;
-
- rs = pthread_attr_init(&attr);
- assert(rs == 0);
-
- int policy = get_thread_policy(attr);
-
- cout << "Show current configuration of priority" << endl;
- show_thread_priority(attr, policy);
-
- cout << "Show priority of current thread" << endl;
- int priority = get_thread_priority(attr);
-
- set_thread_policy(attr, SCHED_FIFO); // SCHED_RR
- cout << "Restore current policy" << endl;
- // set_thread_policy(attr, policy);
- struct sched_param param_;
- param_.sched_priority = sched_get_priority_max(SCHED_FIFO); // SCHED_RR
- //这里设置线程优先级
- cout << "param_.sched_priority = " << param_.sched_priority << endl;
- pthread_setschedparam(pthread_self(), SCHED_FIFO, ¶m_);
- cout << "pthread_setschedparam success" << endl;
-
- rs = pthread_attr_destroy(&attr);
- assert(rs == 0);
- cout << "start capture!!!!!!!!!" << endl;
-
- int id = 0;
- cv::Mat src;
- while (active_) {
- //读取摄像头内容,也就是从摄像头拉流
- video_capture_ >> src;
- //在这里自定义融合算法就行了
- //blender
- // 两张图的透明度融合算法,取rgb像素进行融合
- // data1[add_col] = data2[add_col] * 0.4 + data1[add_col] * 0.6;
- // data1[add_col + 1] = data2[add_col + 1] * 0.4 + data1[add_col + 1] * 0.6;
- // data1[add_col + 2] = data2[add_col + 2] * 0.4 + data1[add_col + 2] * 0.6;
-
-
-
- //融合后,可以利用opencv给src写入当前时间用来观察
- auto cur_time = std::chrono::system_clock::to_time_t(std::chrono::system_clock::now());
- std::stringstream ss;
- ss << std::put_time(std::localtime(&cur_time), "%Y-%m-%d %H:%M:%S");
- std::string str_time = ss.str();
- cv::putText(src, str_time, cv::Point(830, 20), cv::HersheyFonts::FONT_ITALIC, 0.5, cv::Scalar(0, 0, 0), 1,
- 4);
- video_writer_.write(src);
- src.release();
- }
-
- //LOG::info("RtspPushStream::run_capture() end!");
- }
-
- //头文件
- #ifndef RTSHPUSHSTREAM_H
- #define RTSHPUSHSTREAM_H
- #include
- #include
- #include
- #include
- #include
- #include
- #include
- #include
- #include
- #include
- #include
-
-
- using namespace std;
- class RtspPushStream {
- public:
- RtspPushStream();
- ~RtspPushStream();
-
- void start();
- void write_image(cv::Mat image);
-
- void run();
- void start_capture();
- void run_capture();
- void end();
-
- private:
- mutex mutex_;
- cv::VideoWriter video_writer_;
- list
img_mats_; - bool active_;
- cv::Mat mat_;
- cv::Mat space_mat_;
- shared_ptr
cap_thread_; - shared_ptr
write_thread_; - cv::VideoCapture video_capture_;
- std::vector
push_frames_; - std::mutex push_frames_lock_;
- };
- #endif
推流后用相关视频软件拉流就能看到效果,如PotPlayer 64 bit,VLC等等