Complete step-by-step video encoding and decoding using the FFmpeg open source library under windows

Final decoding effect:

1.UI design

2. Enter the default value in the control properties window

3. Copy the compiled FFmpeg library to the same directory as the project

4. Reference the FFmpeg library and header files in the project

5. Link the specified FFmpeg library

6. Use the FFmpeg library

Reference header file

extern "C"
{
#include "libswscale/swscale.h"
#include "libavdevice/avdevice.h"
#include "libavcodec/avcodec.h"
#include "libavcodec/bsf.h"
#include "libavformat/avformat.h"
#include "libavutil/avutil.h"
#include "libavutil/imgutils.h"
#include "libavutil/log.h"
#include "libavutil/imgutils.h"
#include "libavutil/time.h"
#include <libswresample/swresample.h>

}

Create video codec management class

Implement video codec management class

#include "ffmpegmananger.h"
#include <QThread>
ffmpegMananger::ffmpegMananger(QObject *parent ):
    QObject(parent)
{
    m_pInFmtCtx = nullptr;
    m_pTsFmtCtx = nullptr;
    m_qstrRtspURL = "";
    m_qstrOutPutFile = "";
}
ffmpegMananger::~ffmpegMananger()
{
    avformat_free_context(m_pInFmtCtx);
    avformat_free_context(m_pTsFmtCtx);
}

void ffmpegMananger::getRtspURL(QString strRtspURL)
{
    this->m_qstrRtspURL = strRtspURL;
}
void ffmpegMananger::getOutURL(QString strRute)
{
    this->m_qstrOutPutFile = strRute;
    printf("===========%s\\
",m_qstrOutPutFile.toStdString().c_str());
}
void ffmpegMananger::setOutputCtx(AVCodecContext *encCtx, AVFormatContext **pTsFmtCtx,int & amp;nVideoIdx_out)
{
    avformat_alloc_output_context2(pTsFmtCtx, nullptr, nullptr, m_qstrOutPutFile.toStdString().c_str());
    if (!pTsFmtCtx ) {
        printf("Could not create output context\\
");
        return;
    }
    if (avio_open( & amp;((*pTsFmtCtx)->pb), m_qstrOutPutFile.toStdString().c_str(), AVIO_FLAG_READ_WRITE) < 0)
    {
       avformat_free_context(*pTsFmtCtx);
       printf("avio_open fail.");
       return;
    }
    AVStream *out_stream = avformat_new_stream(*pTsFmtCtx, encCtx->codec);
    nVideoIdx_out = out_stream->index;
    //nVideoIdx_out = out_stream->index;
    avcodec_parameters_from_context(out_stream->codecpar, encCtx);
    printf("==========Output Information==========\\
");
    av_dump_format(*pTsFmtCtx, 0, m_qstrOutPutFile.toStdString().c_str(), 1);
    printf("======================================\\
");
}
int ffmpegMananger::ffmepgInput()
{
    int nRet = 0;
    AVCodecContext *encCtx = nullptr;//encoder
    //const char *pUrl = "D:/videos/264.dat";
    std::string temp = m_qstrRtspURL.toStdString();
    const char *pUrl = temp.c_str();
    printf("===========%s\\
",pUrl);
    AVDictionary *options = nullptr;
    av_dict_set( & amp;options,"rtsp_transport", "tcp", 0);
    av_dict_set( & amp;options,"stimeout","10000000",0);
    //Set "buffer_size" cache capacity
    av_dict_set( & amp;options, "buffer_size", "1024000", 0);
    nRet = avformat_open_input( & amp;m_pInFmtCtx,pUrl,nullptr, & amp;options);
    if( nRet < 0)
    {
        printf("Could not open input file,============keep trying \\
");
        return nRet;
    }
    avformat_find_stream_info(m_pInFmtCtx, nullptr);
    printf("===========Input Information==========\\
");
    av_dump_format(m_pInFmtCtx, 0, pUrl, 0);
    printf("======================================\\
");
    //1. Get the video stream number
    int nVideo_indx = av_find_best_stream(m_pInFmtCtx,AVMEDIA_TYPE_VIDEO,-1,-1,nullptr,0);
    if(nVideo_indx < 0)
    {
        avformat_free_context(m_pInFmtCtx);
        printf("Failed to find decoder\\
");
        return -1;
    }
    //2. Find the decoder
    AVCodec *pInCodec = avcodec_find_decoder(m_pInFmtCtx->streams[nVideo_indx]->codecpar->codec_id);
    if(nullptr == pInCodec)
    {
        printf("avcodec_find_decoder fail.");
        return -1;
    }
    //Get decoder context
    AVCodecContext* pInCodecCtx = avcodec_alloc_context3(pInCodec);
    //Copy decoder parameters
    nRet = avcodec_parameters_to_context(pInCodecCtx, m_pInFmtCtx->streams[nVideo_indx]->codecpar);
    if(nRet < 0)
    {

        avcodec_free_context( & amp;pInCodecCtx);
        printf("avcodec_parameters_to_context fail.");
        return -1;
    }
    //Open the decoder
    if(avcodec_open2(pInCodecCtx, pInCodec, nullptr) < 0)
    {
        avcodec_free_context( & amp;pInCodecCtx);
        printf("Error: Can't open codec!\\
");
        return -1;
    }
    printf("width = %d\\
", pInCodecCtx->width);
    printf("height = %d\\
", pInCodecCtx->height);
    int frame_index = 0;
    int got_picture = 0;
    AVStream *in_stream =nullptr;
    AVStream *out_stream =nullptr;
    AVFrame *pFrame= av_frame_alloc();
    AVPacket *newpkt = av_packet_alloc();
    AVPacket *packet = av_packet_alloc();
    av_init_packet(newpkt);
    av_init_packet(packet);
    // allocAVFrame
    AVFrame*pFrameRGB = av_frame_alloc();
    // Image color space conversion, resolution scaling, and front and rear image filtering processing
    SwsContext *m_SwsContext = sws_getContext(pInCodecCtx->width, pInCodecCtx->height,
            pInCodecCtx->pix_fmt, pInCodecCtx->width, pInCodecCtx->height,
            AV_PIX_FMT_RGB32, SWS_BICUBIC, nullptr, nullptr, nullptr);

    int bytes = av_image_get_buffer_size(AV_PIX_FMT_RGB32, pInCodecCtx->width, pInCodecCtx->height,4);
    uint8_t *m_OutBuffer = (uint8_t *)av_malloc(bytes * sizeof(uint8_t));

    // Use the allocated memory space for pFrameRGB
    avpicture_fill((AVPicture *)pFrameRGB, m_OutBuffer, AV_PIX_FMT_RGB32, pInCodecCtx->width, pInCodecCtx->height);
    if(encCtx == nullptr)
    {
        //Open the encoder
        openEncoder(pInCodecCtx->width, pInCodecCtx->height, & amp;encCtx);
    }
    int videoindex_out = 0;
    //Set the output file context
    setOutputCtx(encCtx, & amp;m_pTsFmtCtx,videoindex_out);
    //Write file header
    if (avformat_write_header(m_pTsFmtCtx, nullptr) < 0)
    {
        avformat_free_context(m_pTsFmtCtx);
        printf("Error occurred when opening output file\\
");
        return -1;
    }
    printf("==============writer trail===================.\\
");
    int count = 0;
    nRet = 0;
    while(av_read_frame(m_pInFmtCtx, packet) >= 0)//Read H264 data from pInFmtCtx to packet;
    {
        if(packet->stream_index != nVideo_indx)//only keep images
        {
            continue;
        }
        if(avcodec_send_packet(pInCodecCtx, packet)<0)//Send the H264 data in the packet to the decoder for decoding, and the decoded YUV data is placed in pInCodecCtx,
        {
           break;
        }
        av_packet_unref(packet);
        got_picture = avcodec_receive_frame(pInCodecCtx, pFrame);//Put the decoded YUV data into pFrame
        if(0 == got_picture)//Decode one frame of data
        {
            //Send signal to display image
            // Perform operations such as scaling and format conversion on decoded video frames
            sws_scale(m_SwsContext, (uint8_t const * const *)pFrame->data,
                     pFrame->linesize, 0, pInCodecCtx->height,
                     pFrameRGB->data, pFrameRGB->linesize);

            // Convert to QImage
            QImage tmmImage((uchar *)m_OutBuffer, pInCodecCtx->width, pInCodecCtx->height, QImage::Format_RGB32);
            QImage image = tmmImage.copy();

            //Send QImage
            emit Sig_GetOneFrame(image);

            setDecoderPts(newpkt->stream_index,count, pFrame);
            count + + ;
            //Send the original data to the encoder for encoding
            nRet = avcodec_send_frame(encCtx,pFrame);
            if(nRet < 0)
            {
                continue;
            }
            //Get numbered data from encoder
            while(nRet >= 0)
            {
                nRet = avcodec_receive_packet(encCtx,newpkt);
                if(nRet < 0)
                {
                    break;
                }
                setEncoderPts(nVideo_indx,frame_index,videoindex_out,newpkt);
                int _count = 1;
                printf("Write %d Packet. size:]\tpts:%lld\\
", _count,newpkt->size, newpkt->pts);

                if (av_interleaved_write_frame(m_pTsFmtCtx, newpkt) < 0)
                {
                    printf("Error muxing packet\\
");
                    goto end;
                }
                _count + + ;
                av_packet_unref(newpkt);
            }
        }
    }
    while(1)//Read H264 data from pInFmtCtx to packet;
    {
        if(packet->stream_index != nVideo_indx)//only keep images
        {
            continue;
        }
        if(avcodec_send_packet(pInCodecCtx, packet)<0)//Send the H264 data in the packet to the decoder for decoding, and the decoded YUV data is placed in pInCodecCtx,
        {
            continue;
        }
        av_packet_unref(packet);
        got_picture = avcodec_receive_frame(pInCodecCtx, pFrame);//Put the decoded YUV data into pFrame
        if(!got_picture)//Decode one frame of data
        {
            AVRational in_time_base1 = in_stream->time_base;
            in_stream = m_pInFmtCtx->streams[newpkt->stream_index];

            //Duration between 2 frames (us)
            int64_t in_duration = (double)AV_TIME_BASE / av_q2d(in_stream->r_frame_rate);
            pFrame->pts = (double)(count*in_duration) / (double)(av_q2d(in_time_base1)*AV_TIME_BASE);
            count + + ;
            //Send the original data to the encoder for encoding
            nRet = avcodec_send_frame(encCtx,pFrame);
            if(nRet < 0)
            {
                break;
            }
            //Get numbered data from encoder
            while(nRet >= 0)
            {
                nRet = avcodec_receive_packet(encCtx,newpkt);
                if(nRet < 0)
                {
                    continue;
                }
                in_stream = m_pInFmtCtx->streams[newpkt->stream_index];
                out_stream = m_pTsFmtCtx->streams[videoindex_out];
                if (newpkt->stream_index == nVideo_indx)
                {
                    //FIX: No PTS (Example: Raw H.264)
                    //Simple Write PTS
                    if (newpkt->pts == AV_NOPTS_VALUE)
                    {
                        //Write PTS
                        AVRational time_base1 = in_stream->time_base;
                        //Duration between 2 frames (us)
                        int64_t calc_duration = (double)AV_TIME_BASE / av_q2d(in_stream->r_frame_rate);
                        //Parameters
                        newpkt->pts = (double)(frame_index*calc_duration) / (double)(av_q2d(time_base1)*AV_TIME_BASE);
                        newpkt->dts = newpkt->pts;
                        newpkt->duration = (double)calc_duration / (double)(av_q2d(time_base1)*AV_TIME_BASE);
                        frame_index + + ;
                    }
                 }
                //Convert PTS/DTS
                newpkt->pts = av_rescale_q_rnd(newpkt->pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
                newpkt->dts = av_rescale_q_rnd(newpkt->dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
                newpkt->duration = av_rescale_q(newpkt->duration, in_stream->time_base, out_stream->time_base);
                newpkt->pos = -1;
                newpkt->stream_index = videoindex_out;
                int count = 1;
                printf("Write %d Packet. size:]\tpts:%lld\\
", count,newpkt->size, newpkt->pts);

                if (av_interleaved_write_frame(m_pTsFmtCtx, newpkt) < 0)
                {
                    printf("Error muxing packet\\
");
                    goto end;
                }
                count + + ;
                av_packet_unref(newpkt);
            }
        }
    }
    //Write file trailer
    av_write_trailer(m_pTsFmtCtx);
end:
    av_frame_free( & amp;pFrame);
    av_frame_free( & amp;pFrameRGB);
    av_packet_unref(newpkt);
    av_packet_unref(packet);
    std::cout<<"rtsp's h264 to ts end";
    return 0;
}
void ffmpegMananger::setDecoderPts(int idx,int count,AVFrame *pFrame)
{
    AVStream* in_stream = m_pInFmtCtx->streams[idx];
    AVRational in_time_base1 = in_stream->time_base;
    //Duration between 2 frames (us)
    int64_t in_duration = (double)AV_TIME_BASE / av_q2d(in_stream->r_frame_rate);
    pFrame->pts = (double)(count*in_duration) / (double)(av_q2d(in_time_base1)*AV_TIME_BASE);
}
void ffmpegMananger::setEncoderPts(int nVideo_indx,int frame_index,int videoindex_out,AVPacket *newpkt)
{
    AVStream*in_stream = m_pInFmtCtx->streams[newpkt->stream_index];
    AVStream*out_stream = m_pTsFmtCtx->streams[videoindex_out];
    if (newpkt->stream_index == nVideo_indx)
    {
        //FIX: No PTS (Example: Raw H.264)
        //Simple Write PTS
        if (newpkt->pts == AV_NOPTS_VALUE)
        {
            //Write PTS
            AVRational time_base1 = in_stream->time_base;
            //Duration between 2 frames (us)
            int64_t calc_duration = (double)AV_TIME_BASE / av_q2d(in_stream->r_frame_rate);
            //Parameters
            newpkt->pts = (double)(frame_index*calc_duration) / (double)(av_q2d(time_base1)*AV_TIME_BASE);
            newpkt->dts = newpkt->pts;
            newpkt->duration = (double)calc_duration / (double)(av_q2d(time_base1)*AV_TIME_BASE);
            frame_index + + ;
        }
     }
    //Convert PTS/DTS
    newpkt->pts = av_rescale_q_rnd(newpkt->pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
    newpkt->dts = av_rescale_q_rnd(newpkt->dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
    newpkt->duration = av_rescale_q(newpkt->duration, in_stream->time_base, out_stream->time_base);
    newpkt->pos = -1;
    newpkt->stream_index = videoindex_out;
}
void ffmpegMananger::writeTail()
{
    //Write file trailer
    av_write_trailer(m_pTsFmtCtx);
}
void ffmpegMananger::openEncoder(int width, int height, AVCodecContext** enc_ctx)
{
    //Use libx264 encoder
    AVCodec * pCodec = avcodec_find_encoder_by_name("libx264");
    if(nullptr == pCodec)
    {
        printf("avcodec_find_encoder_by_name fail.\\
");
        return;
    }
    //Get the encoder context
    *enc_ctx = avcodec_alloc_context3(pCodec);
    if(nullptr == enc_ctx)
    {
        printf("avcodec_alloc_context3(pCodec) fail.\\
");
        return;
    }
    //sps/pps
    (*enc_ctx)->profile = FF_PROFILE_H264_MAIN;
    (*enc_ctx)->level = 30; //Indicates level is 5.0
    //Resolution
    (*enc_ctx)->width = width;
    (*enc_ctx)->height = height;
    //gop
    (*enc_ctx)->gop_size = 25; //i frame interval
    (*enc_ctx)->keyint_min = 20;//Set the minimum interval for automatically inserting i frames.OPTION
    //B frame
    (*enc_ctx)->max_b_frames = 0;//No B frames
    (*enc_ctx)->has_b_frames = 0;//
    //reference frame
    (*enc_ctx)->refs = 3;//OPTION
    //Set the input yuv format
    (*enc_ctx)->pix_fmt = AV_PIX_FMT_YUV420P;
    //Set code rate
    (*enc_ctx)->bit_rate = 3000000;
    //Set frame rate
    //(*enc_ctx)->time_base = (AVRational){1,25};//The interval between frames
    (*enc_ctx)->time_base.num = 1;
    (*enc_ctx)->time_base.den = 25;
    //(*enc_ctx)->framerate = (AVRational){25,1};//Frame rate 25 frames per second
    (*enc_ctx)->framerate.num = 25;
    (*enc_ctx)->framerate.den = 1;
    if(avcodec_open2((*enc_ctx),pCodec,nullptr) < 0)
    {
        printf("avcodec_open2 fail.\\
");
    }
    return;
}