• FFmpeg提取视频参数,以及剪辑视频,拼接视频,合并视频,抽帧等


    FFmpeg提取视频参数,以及剪辑视频,拼接视频,合并视频,抽帧等

    视频封面图获取

    
    #ifndef _BUFFER_CONTAINER_H_
    #define _BUFFER_CONTAINER_H_
    #include 
    
    
    
    template <typename T>
    class BufferContainer
    {
    public:
    
        /**
         * @brief Construct a new Buffer Container object 构造函数赋值拷贝内存
         *
         * @param t1
         * @param length
         */
        BufferContainer()
        {
    
            m_buffer = nullptr;
            m_length = 0;
    
        }
        /**
         * @brief Construct a new Buffer Container object 构造函数赋值拷贝内存
         *
         * @param t1
         * @param length
         */
        BufferContainer(const T &t1, int length)
        {
            if (t1 && length > 0)
            {
                m_buffer = new T[length];
                memset(m_buffer,0,length* sizeof(T));
                m_length = length;
                memcpy(m_buffer, &t1, length * sizeof(T));
            }
        }
       void setData(T*pData,int length){
    
            if(pData&&m_buffer){
                delete [] m_buffer;
                m_buffer=nullptr;
            }
            if(length>0&&m_length>0){
                    m_length=0;
            }
           m_buffer=new T[length];
           memset(m_buffer,0,length* sizeof(T));
           m_length = length;
           memcpy(m_buffer, pData, length * sizeof(T));
    
       }
    
        /**
         * @brief Construct a new Buffer Container object 深拷贝构造函数
         *
         * @param other
         */
        BufferContainer(const BufferContainer &other)
        {
            if (other.m_buffer && other.m_length > 0)
            {
                m_buffer = new T[other.m_length];
                memset(m_buffer,0,other.m_length* sizeof(T));
                m_length = other.m_length;
                memcpy(m_buffer, other.m_buffer, other.m_length * sizeof(T));
            }
        }
        /**
         * @brief Destroy the Buffer Container object  析构函数
         *
         */
        ~BufferContainer()
        {
            if (m_buffer)
            {
                delete[] m_buffer;
                m_buffer = nullptr;
                m_length = 0;
            }
        }
        /**
         * @brief  赋值操作符重载
         *
         * @param other
         * @return BufferContainer&
         */
        BufferContainer &operator=(const BufferContainer &other)
        {
    //        if (this != other)
            {
                delete[] m_buffer;
                m_buffer = nullptr;
                m_length = 0;
                if (other.m_buffer)
                {
                    m_buffer = new T[other.m_length];
                     memset(m_buffer,0,other.m_length* sizeof(T));
                    m_length = other.m_length;
                    memcpy(m_buffer, other.m_buffer, sizeof(T) * other.m_length);
                }
            }
    
            return *this;
        }
    
        /**
         * @brief 返回数据指针
         *
         * @return T*
         */
        T *data()
        {
            return m_buffer;
        }
        /**
         * @brief 返回数据长度
         *
         * @return int
         */
        int length()
        {
            return m_length;
        }
    
    private:
        T *m_buffer = nullptr;
        int m_length = 0;
    };
    
    #endif //_BUFFER_CONTAINER_H_
    
    
    
    BufferContainer<unsigned char> VideoEditerBase::thumbnail(const std::string& filePath)
    {
    	av_register_all();
    	BufferContainer<unsigned char> result;
    
    	AVFormatContext* fmtContext = nullptr;
    	if (avformat_open_input(&fmtContext, filePath.c_str(), nullptr, nullptr) < 0) {
    		return result;
    	}
    	if (avformat_find_stream_info(fmtContext, nullptr) < 0) {
    		avformat_close_input(&fmtContext);
    		return result;
    	}
    	int nStreamIndex = -1;
    	AVCodecParameters* codecParameters = nullptr;
    	for (int i = 0; i < fmtContext->nb_streams; i++) {
    		if (fmtContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
    			nStreamIndex = i;
    			codecParameters = fmtContext->streams[i]->codecpar;
    			break;
    		}
    	}
    	if (nStreamIndex == -1) {
    		avformat_close_input(&fmtContext);
    		return result;
    	}
    	AVCodec* codec = avcodec_find_decoder(codecParameters->codec_id);
    
    	if (!codec) {
    		avformat_close_input(&fmtContext);
    		return result;
    	}
    	AVCodecContext* codecContext = avcodec_alloc_context3(codec);
    	if (!codecContext) {
    		// ���������������ʧ��
    		avformat_close_input(&fmtContext);
    		return result;
    	}
    	if (avcodec_parameters_to_context(codecContext, codecParameters) < 0) {
    		// ���ƽ�����������������������ʧ��
    		avcodec_free_context(&codecContext);
    		avformat_close_input(&fmtContext);
    		return result;
    	}
    	if (avcodec_open2(codecContext, codec, nullptr) < 0) {
    		// �򿪽�����ʧ��
    		avcodec_free_context(&codecContext);
    		avformat_close_input(&fmtContext);
    		return result;
    	}
    	AVPacket packet;
    	av_init_packet(&packet);
    	packet.data = nullptr;
    	packet.size = 0;
    	//    bool getFlag=true;
    	int frameFinished = 0;
    	AVFrame* frame = av_frame_alloc();
    	while (av_read_frame(fmtContext, &packet) >= 0) {
    		if (packet.stream_index != nStreamIndex) {
    			continue;
    		}
    		int ret = avcodec_decode_video2(codecContext, frame, &frameFinished, &packet);
    		if (!frameFinished) {
    			continue;
    		}
    		if (frame) {
    			int ret = avcodec_send_packet(codecContext, &packet);
    			if (ret >= 0) {
    				ret = avcodec_receive_frame(codecContext, frame);
    				if (ret >= 0) {
    					// ����һ֡����Ϊ����ͼ��
    					if (frame->key_frame) {
    						AVFrame* rgbFrame = av_frame_alloc();
    						if (rgbFrame) {
    							rgbFrame->format = AV_PIX_FMT_RGB24;
    							rgbFrame->width = frame->width;
    							rgbFrame->height = frame->height;
    
    							int bufferSize = av_image_get_buffer_size(AV_PIX_FMT_RGB24, frame->width, frame->height, 1);
    							uint8_t* buffer = new uint8_t[bufferSize];
    							av_image_fill_arrays(rgbFrame->data, rgbFrame->linesize, buffer, AV_PIX_FMT_RGB24, frame->width, frame->height, 1);
    
    							SwsContext* swsContext = sws_getContext(frame->width, frame->height, codecContext->pix_fmt,
    								frame->width, frame->height, AV_PIX_FMT_RGB24, SWS_BICUBIC, nullptr, nullptr, nullptr);
    							if (swsContext) {
    								sws_scale(swsContext, frame->data, frame->linesize, 0, frame->height, rgbFrame->data, rgbFrame->linesize);
    								sws_freeContext(swsContext);
    
    								// �������ͼ���ļ�
    								int outputBufferSize = rgbFrame->width * rgbFrame->height * 3;
    								unsigned char* outputBuffer = new unsigned char[outputBufferSize];
    
    								for (int i = 0; i < rgbFrame->height; i++) {
    									memcpy(outputBuffer + i * rgbFrame->width * 3, rgbFrame->data[0] + i * rgbFrame->linesize[0], rgbFrame->width * 3);
    								}
    								//                                static int index=0;
    								//                                QImage(outputBuffer, rgbFrame->width, rgbFrame->height, QImage::Format_RGB888).copy().save(QString("E:/workspace/build-VideoCodec-Desktop_Qt_5_7_1_MSVC2015_64bit-Debug/debug/%1.jpg").arg(index++));
    
    								result.setData(outputBuffer, outputBufferSize);
    								if (outputBuffer) {
    									delete[] outputBuffer;
    									outputBuffer = nullptr;
    								}
    
    							}
    
    							if (buffer) {
    								delete[] buffer;
    								buffer = nullptr;
    							}
    
    							av_frame_free(&rgbFrame);
    						}
    					}
    				}
    			}
    
    		}
    		av_packet_unref(&packet);
    		break;
    		
    	}
    
    	av_frame_free(&frame);
    	avcodec_free_context(&codecContext);
    	avformat_close_input(&fmtContext);
    	return result;
    
    }
    
    

    视频

    #include "videoeditermp4.h"
    #include 
    extern "C" {
    #include "libavformat/avformat.h"
    #include "libavcodec/avcodec.h"
    #include "libavutil/imgutils.h"
    #include "libavutil/opt.h"
    #include "libswresample/swresample.h"
    #include "libswscale/swscale.h"
    }
    VideoEditerMp4::VideoEditerMp4()
    {
    
    }
    VideoEditerMp4::~VideoEditerMp4() {
        stop();
    }
    void VideoEditerMp4::startClip(std::string input, std::string output, int64_t st, int64_t et)
    {
        if(!m_vecInputPaths.empty()){
            m_vecInputPaths.clear();
        }
        //    stop();
        m_vecInputPaths.push_back(input);
        m_outputPath = output;
        m_startTime = st;
        m_endTime = et;
        m_pThread = new std::thread(&VideoEditerMp4::runClip, this);
    
        m_pThread->detach();
    }
    
    void VideoEditerMp4::startMerge(std::vector<std::string>inputs, std::string output)
    {
    	if (!m_vecInputPaths.empty()) {
    		m_vecInputPaths.clear();
    	}
    	//    stop();
    	m_vecInputPaths=inputs;
    	m_outputPath = output;
        m_pThread = new std::thread(&VideoEditerMp4::runMerge, this);
    
    	m_pThread->detach();
    }
    
    void VideoEditerMp4::runClip()
    {
        stateCallBack(RUNNING);
    
        if (m_vecInputPaths.empty() || m_outputPath.empty()) {
    
            stateCallBack(FAIL);
            return;
        }
        AVFormatContext* fmtContext = avformat_alloc_context();
        if (avformat_open_input(&fmtContext,m_vecInputPaths.front().c_str() , nullptr, nullptr) < 0) {
            //fprintf(stderr, "�޷��򿪵�һ�������ļ�\n");
    
            stateCallBack(FAIL);
            return ;
        }
        if (avformat_find_stream_info(fmtContext, nullptr) < 0) {
            //fprintf(stderr, "�޷��ҵ���һ�������ļ�������Ϣ\n");
            avformat_close_input(&fmtContext);
    
            stateCallBack(FAIL);
            return ;
        }
    
    
        int videoStreamIndex = -1;
        for (int i = 0; i < fmtContext->nb_streams; i++) {
            if (fmtContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
                videoStreamIndex = i;
                break;
            }
        }
    
        int audioStreamIndex = -1;
        for (int i = 0; i < fmtContext->nb_streams; i++) {
            if (fmtContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
                audioStreamIndex = i;
                break;
            }
        }
    
        AVFormatContext* output_format_ctx = nullptr;
        if (avformat_alloc_output_context2(&output_format_ctx, nullptr, nullptr, m_outputPath.c_str()))
        {
            avformat_close_input(&fmtContext);
            stateCallBack(FAIL);
            return;
        }
        // �������������
        for (int i = 0; i < fmtContext->nb_streams; i++) {
            AVStream* stream = avformat_new_stream(output_format_ctx, nullptr);
            if (!stream) {
                avformat_close_input(&fmtContext);
                avformat_close_input(&output_format_ctx);
                stateCallBack(FAIL);
                return;
            }
            //        stream->time_base=fmtContext->streams[videoStreamIndex]->time_base;
            //        stream->duration=(m_endTime-m_startTime);
    
            if (avcodec_copy_context(stream->codec, fmtContext->streams[i]->codec)<0) {
                avformat_close_input(&fmtContext);
                avformat_close_input(&output_format_ctx);
                stateCallBack(FAIL);
                return;
            }
    
            if (avcodec_parameters_copy(stream->codecpar, fmtContext->streams[i]->codecpar) < 0) {
                avformat_close_input(&fmtContext);
                avformat_close_input(&output_format_ctx);
                stateCallBack(FAIL);
                return;
            }
    
    
        }
        // ������ļ�
        if (!(output_format_ctx->oformat->flags & AVFMT_NOFILE)) {
            if (avio_open(&output_format_ctx->pb, m_outputPath.c_str(), AVIO_FLAG_WRITE) < 0) {
                //            fprintf(stderr, "�޷�������ļ�\n");
    
                avformat_close_input(&fmtContext);
                avformat_close_input(&output_format_ctx);
    
                stateCallBack(FAIL);
                return ;
            }
        }
    
    
        // д���ļ�ͷ
        if (avformat_write_header(output_format_ctx, nullptr) < 0) {
            //fprintf(stderr, "�޷�д������ļ�ͷ\n");
            avformat_close_input(&fmtContext);
            avformat_close_input(&output_format_ctx);
            stateCallBack(FAIL);
            return ;
        }
    
    
        int64_t videoIndex = 0;
        int64_t audioIndex = 0;
    
        AVRational  timeBase=fmtContext->streams[videoStreamIndex]->time_base;
    
    
        AVPacket packet;
        while (true)
        {
            //����Ƿ�ֹͣ
            if (m_bStop) {
    
                break;
            }
            if (av_read_frame(fmtContext, &packet) < 0) {
    
                break;
            }
            if (packet.stream_index == audioStreamIndex) {
                if (packet.pts*av_q2d(timeBase)>= m_startTime && packet.pts*av_q2d(timeBase)<= m_endTime) {
                    if (audioIndex % m_interval == 0) {
                        if(audioIndex==0){
                            packet.pts = av_rescale_q(0, timeBase, output_format_ctx->streams[videoStreamIndex]->time_base);
                            packet.dts = av_rescale_q(0, timeBase,  output_format_ctx->streams[videoStreamIndex]->time_base);
                        }else{
                            packet.pts = av_rescale_q(packet.pts-m_startTime/av_q2d(timeBase), timeBase, output_format_ctx->streams[videoStreamIndex]->time_base);
                            packet.dts = av_rescale_q(packet.dts -m_startTime/av_q2d(timeBase), timeBase,  output_format_ctx->streams[videoStreamIndex]->time_base);
                        }
    
                        packet.duration = av_rescale_q(packet.duration, timeBase,  output_format_ctx->streams[videoStreamIndex]->time_base);
                        av_write_frame(output_format_ctx, &packet);
                        audioIndex++;
                        continue;
                        av_packet_unref(&packet);
                    }
                    audioIndex++;
                }
            }
    
            if (packet.stream_index == videoStreamIndex) {
                if (packet.pts*av_q2d(timeBase)>= m_startTime && packet.pts*av_q2d(timeBase)<= m_endTime) {
                    qDebug()<<"============>sec:"<<packet.pts*av_q2d(timeBase)<<"<==============";
                    if (videoIndex % m_interval == 0) {
                        //                    packet.pts = packet.pts-m_startTime/av_q2d(timeBase);
                        //                    packet.dts = packet.pts;
                        packet.pts = av_rescale_q(packet.pts-m_startTime/av_q2d(timeBase), timeBase, output_format_ctx->streams[videoStreamIndex]->time_base);
                        packet.dts = av_rescale_q(packet.dts -m_startTime/av_q2d(timeBase), timeBase,  output_format_ctx->streams[videoStreamIndex]->time_base);
                        packet.duration = av_rescale_q(packet.duration, timeBase,  output_format_ctx->streams[videoStreamIndex]->time_base);
                        av_write_frame(output_format_ctx, &packet);
                        videoIndex++;
                        av_packet_unref(&packet);
                        continue;
                    }
                    videoIndex++;
                }
            }
    
            av_packet_unref(&packet);
    
    
            // ����Ƿ���ͣ
            {
                std::unique_lock<std::mutex> lock(m_mutex);
                cv.wait(lock, [this]{
                    if (m_bPaused) {
                        stateCallBack(PAUSE);
                    }
                    return !m_bPaused; });
            }
        }
    
        // д���ļ�β
        av_write_trailer(output_format_ctx);
        // �ر��ļ�
        avformat_close_input(&fmtContext);
        avio_close(output_format_ctx->pb);
        avformat_free_context(output_format_ctx);
        if (m_bStop) {
            stateCallBack(STOP);
            return;
        }
        stateCallBack(FINISH);
    }
    
    void VideoEditerMp4::runMerge()
    {
        stateCallBack(RUNNING);
    
        if (m_vecInputPaths.empty() || m_outputPath.empty()) {
    
            stateCallBack(FAIL);
            return;
        }
        AVFormatContext* fmtContext = avformat_alloc_context();
        if (avformat_open_input(&fmtContext,m_vecInputPaths.front().c_str() , nullptr, nullptr) < 0) {
            //fprintf(stderr, "�޷��򿪵�һ�������ļ�\n");
    
            stateCallBack(FAIL);
            return ;
        }
        if (avformat_find_stream_info(fmtContext, nullptr) < 0) {
            //fprintf(stderr, "�޷��ҵ���һ�������ļ�������Ϣ\n");
            avformat_close_input(&fmtContext);
    
            stateCallBack(FAIL);
            return ;
        }
    
    
        int videoStreamIndex = -1;
        for (int i = 0; i < fmtContext->nb_streams; i++) {
            if (fmtContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
                videoStreamIndex = i;
                break;
            }
        }
    
        int audioStreamIndex = -1;
        for (int i = 0; i < fmtContext->nb_streams; i++) {
            if (fmtContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
                audioStreamIndex = i;
                break;
            }
        }
    
        AVFormatContext* output_format_ctx = nullptr;
        if (avformat_alloc_output_context2(&output_format_ctx, nullptr, nullptr, m_outputPath.c_str()))
        {
            avformat_close_input(&fmtContext);
            stateCallBack(FAIL);
            return;
        }
        // �������������
        for (int i = 0; i < fmtContext->nb_streams; i++) {
            AVStream* stream = avformat_new_stream(output_format_ctx, nullptr);
            if (!stream) {
                avformat_close_input(&fmtContext);
                avformat_close_input(&output_format_ctx);
                stateCallBack(FAIL);
                return;
            }
            //        stream->time_base=fmtContext->streams[videoStreamIndex]->time_base;
            //        stream->duration=(m_endTime-m_startTime);
    
            if (avcodec_copy_context(stream->codec, fmtContext->streams[i]->codec)<0) {
                avformat_close_input(&fmtContext);
                avformat_close_input(&output_format_ctx);
                stateCallBack(FAIL);
                return;
            }
    
            if (avcodec_parameters_copy(stream->codecpar, fmtContext->streams[i]->codecpar) < 0) {
                avformat_close_input(&fmtContext);
                avformat_close_input(&output_format_ctx);
                stateCallBack(FAIL);
                return;
            }
    
    
        }
        // ������ļ�
        if (!(output_format_ctx->oformat->flags & AVFMT_NOFILE)) {
            if (avio_open(&output_format_ctx->pb, m_outputPath.c_str(), AVIO_FLAG_WRITE) < 0) {
                //            fprintf(stderr, "�޷�������ļ�\n");
    
                avformat_close_input(&fmtContext);
                avformat_close_input(&output_format_ctx);
    
                stateCallBack(FAIL);
                return ;
            }
        }
    
    
        // д���ļ�ͷ
        if (avformat_write_header(output_format_ctx, nullptr) < 0) {
            //fprintf(stderr, "�޷�д������ļ�ͷ\n");
            avformat_close_input(&fmtContext);
            avformat_close_input(&output_format_ctx);
            stateCallBack(FAIL);
            return ;
        }
    
    
        int64_t videoIndex = 0;
        int64_t audioIndex = 0;
    
        AVRational  timeBase=fmtContext->streams[videoStreamIndex]->time_base;
    
        int64_t currentPts=0;
        AVPacket packet;
        for(int k=0;k<m_vecInputPaths.size();k++){
            //����Ƿ�ֹͣ
            if (m_bStop) {
    
                break;
            }
    
    
            avformat_close_input(&fmtContext);
    
            if (avformat_open_input(&fmtContext,m_vecInputPaths.at(k).c_str() , nullptr, nullptr) < 0) {
                //fprintf(stderr, "�޷��򿪵�һ�������ļ�\n");
    
                stateCallBack(FAIL);
                return ;
            }
            if (avformat_find_stream_info(fmtContext, nullptr) < 0) {
                //fprintf(stderr, "�޷��ҵ���һ�������ļ�������Ϣ\n");
                avformat_close_input(&fmtContext);
    
                stateCallBack(FAIL);
                return ;
            }
    
    
            int videoStreamIndex = -1;
            for (int i = 0; i < fmtContext->nb_streams; i++) {
                if (fmtContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
                    videoStreamIndex = i;
                    break;
                }
            }
    
            int audioStreamIndex = -1;
            for (int i = 0; i < fmtContext->nb_streams; i++) {
                if (fmtContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
                    audioStreamIndex = i;
                    break;
                }
            }
    
            if(k!=0){
                currentPts+=(fmtContext->streams[videoStreamIndex]->duration/1000)/av_q2d(timeBase);
            }
            while (true)
            {
                //����Ƿ�ֹͣ
                if (m_bStop) {
    
                    break;
                }
                if (av_read_frame(fmtContext, &packet) < 0) {
    
                    break;
                }
                if (packet.stream_index == audioStreamIndex) {
                    //if (packet.pts*av_q2d(timeBase)>= m_startTime && packet.pts*av_q2d(timeBase)<= m_endTime) {
                        if (audioIndex % m_interval == 0) {
                            if(audioIndex==0){
                                packet.pts = av_rescale_q(0, timeBase, output_format_ctx->streams[videoStreamIndex]->time_base);
                                packet.dts = av_rescale_q(0, timeBase,  output_format_ctx->streams[videoStreamIndex]->time_base);
                            }else{
                                packet.pts = av_rescale_q(packet.pts+currentPts, timeBase, output_format_ctx->streams[videoStreamIndex]->time_base);
                                packet.dts = av_rescale_q(packet.dts+currentPts, timeBase,  output_format_ctx->streams[videoStreamIndex]->time_base);
                            }
    
                            packet.duration = av_rescale_q(packet.duration, timeBase,  output_format_ctx->streams[videoStreamIndex]->time_base);
                            av_write_frame(output_format_ctx, &packet);
                            audioIndex++;
                            continue;
                            av_packet_unref(&packet);
                        }
                        audioIndex++;
                    //}
                }
    
                if (packet.stream_index == videoStreamIndex) {
                    //if (packet.pts*av_q2d(timeBase)>= m_startTime && packet.pts*av_q2d(timeBase)<= m_endTime) {
                        qDebug()<<"============>sec:"<<packet.pts*av_q2d(timeBase)<<"<==============";
                        if (videoIndex % m_interval == 0) {
                            //                    packet.pts = packet.pts-m_startTime/av_q2d(timeBase);
                            //                    packet.dts = packet.pts;
                            if(videoIndex==0){
                                packet.pts = av_rescale_q(0, timeBase, output_format_ctx->streams[videoStreamIndex]->time_base);
                                packet.dts = av_rescale_q(0, timeBase,  output_format_ctx->streams[videoStreamIndex]->time_base);
                            }else{
                                packet.pts = av_rescale_q(packet.pts+currentPts, timeBase, output_format_ctx->streams[videoStreamIndex]->time_base);
                                packet.dts = av_rescale_q(packet.dts+currentPts, timeBase,  output_format_ctx->streams[videoStreamIndex]->time_base);
                            }
                            packet.duration = av_rescale_q(packet.duration, timeBase,  output_format_ctx->streams[videoStreamIndex]->time_base);
                            av_write_frame(output_format_ctx, &packet);
                            videoIndex++;
                            av_packet_unref(&packet);
                            continue;
                        }
                        videoIndex++;
                    //}
                }
    
                av_packet_unref(&packet);
    
    
                // ����Ƿ���ͣ
                {
                    std::unique_lock<std::mutex> lock(m_mutex);
                    cv.wait(lock, [this]{
                        if (m_bPaused) {
                            stateCallBack(PAUSE);
                        }
                        return !m_bPaused; });
                }
            }
    
    
        }
    
    
    
        // д���ļ�β
        av_write_trailer(output_format_ctx);
        // �ر��ļ�
        avformat_close_input(&fmtContext);
        avio_close(output_format_ctx->pb);
        avformat_free_context(output_format_ctx);
        if (m_bStop) {
            stateCallBack(STOP);
            return;
        }
        stateCallBack(FINISH);
    }
    
    
  • 相关阅读:
    英诺伟再冲刺港交所上市:上半年利润下降77%,严航为董事长兼CEO
    企业特权账号安全管理建设思路
    Elasticsearch 写入优化,从 3000 到 8000/s,让你的 ES 飞起来
    java计算机毕业设计物流信息管理系统录像演示源码+系统+数据库+lw文档+mybatis+运行部署
    基于springboot+vue的靓车汽车销售网站(前后端分离)
    12 项目采购管理
    JSON概念、基础语法及数据和对象转换
    Android Poco初始化时,不大起眼但可能存在坑点的参数们
    JavaScript之函数、返回值、变量作用域、立即执行函数
    竞赛选题 基于深度学习的视频多目标跟踪实现
  • 原文地址:https://blog.csdn.net/weixin_42252757/article/details/138576653