Use Qt+FFmpeg to capture the desktop for rtsp streaming and write SEI information in real time

FFmpegMedia.h

#pragma once

#include <QObject>

class QMutex;
template<typename> class QFutureWatcher;

class FFmpegMedia: public QObject
{
    Q_OBJECT

public:
    FFmpegMedia(QObject *parent = nullptr);
    ~FFmpegMedia();

    inline void setCodecName(const QString & amp;codec) { mdesCodec = codec; }
    inline void setTargetWindowTitle(const QString & amp;title) { mwindowTitle = title; }

    inline void setFrameRate(int rate) { mframeRate = rate; }
    inline int frameRate() const { return mframeRate; }

    inline void setGrabId(int id) { mgrabId = id; }
    inline int grabId() const { return mgrabId; }
    
    void setSeiData(const QByteArray & amp;data);
    QByteArray seiData() const;
    
    void start();
    void stop();
    
protected:
    void runGrab();

private:
    bool isStoped() const;
    
private:
    QString mdesCodec, mwindowTitle;
    int mframeRate, mgrabId;
    QByteArray mcurrentSeiData;
    QMutex *mmutex;
    bool mstoped;
    QFutureWatcher<void> *mwatcher;
};

FFmpegMedia.cpp

extern "C" {
#include "libavcodec/avcodec.h"
#include "libavdevice/avdevice.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
#include "libavutil/time.h"
#include "libavutil/imgutils.h"
}

#include <QMutex>
#include <QMutexLocker>
#include <QtConcurrent/QtConcurrent>

#include "FFmpegMedia.h"


static void avError(const QString & what, int errNum)
{
    char text[1024];
    av_strerror(errNum, text, sizeof(text));
    printf("%s: %s", what.toUtf8().data(), text);
}


enum {
    SeiUuidSize = 16
};

static uint seiNaluSize(uint content)
{
    uint sei_payload_size = content + SeiUuidSize;
    int size_num = 0;
    uint size = sei_payload_size;
    while (1)
    {
        if (size < 0xFF) {
            size_num + + ;
            break;
        }
        size -= 0xFF;
        size_num + + ;
    }
    uint sei_size = 1 + 1 + size_num + sei_payload_size;

    uint tail_size = 2;
    if (sei_size % 2 == 1)
        tail_size -= 1;
    
    sei_size + = tail_size;
    return sei_size;
}

void addSeiData(AVPacket *packet, const QByteArray & amp;data)
{
    if (packet->stream_index != AVMEDIA_TYPE_VIDEO)
        return;
    
    QByteArray oldData(reinterpret_cast<const char *>(packet->data), packet->size);

    uint naluSize = seiNaluSize(data.size());
    av_grow_packet(packet, naluSize + 4); //Extended memory, NAL header:4

    uchar *pd = packet->data;

    uchar startCode[] = { 0x00, 0x00, 0x00, 0x01 };
    memcpy(pd, startCode, sizeof(uint));
    pd + = sizeof(uint);

    uchar *sei = pd;

    *pd + + = 6; // NAL header
    *pd + + = 5; // sei type unregister
    int seiSize = data.size() + SeiUuidSize;
    while(true)
    {
        *pd + + = (seiSize >= 0xFF ? 0xFF : (char)seiSize);
        if (seiSize < 0xFF)
            break;
        seiSize -= 0xFF;
    }

    //UUID
    uchar uuid[] = { 0xdc, 0x45, 0xe9, 0xbd, 0xe6,
    0xd9, 0x48, 0xb7, 0x96, 0x2c, 0xd8, 0x20, 0xd9, 0x23, 0xee, 0xef };
    memcpy(pd, uuid, SeiUuidSize);
    pd + = SeiUuidSize;

    //Add SEI data
    memcpy(pd, data.data(), data.size());
    pd + = data.size();

    // tail cutoff alignment code
    if (sei + naluSize - pd == 1) {
        *pd + + = 0x80;
    }
    else if (sei + naluSize - pd == 2) {
        *pd + + = 0x00;
        *pd + + = 0x80;
    }

    //Add original video stream data
    memcpy(pd, oldData.data(), oldData.size());
}


classFFmpegIniter
{
public:
    FFmpegIniter()
    {
        avdevice_register_all();
        avformat_network_init();
    }
};


FFmpegMedia::FFmpegMedia(QObject *parent)
    : QObject(parent),
    mdesCodec("libx264"),
    mframeRate(30),
    mgrabId(0),
    mwindowTitle("desktop"),
    mstoped(false),
    mmutex(new QMutex),
    mwatcher(new QFutureWatcher<void>(this))
{
}

FFmpegMedia::~FFmpegMedia()
{
    stop();
    mwatcher->waitForFinished();
    delete mmutex;
}

void FFmpegMedia::setSeiData(const QByteArray & amp;data)
{
    QMutexLocker locker(mmutex);
    mcurrentSeiData = data;
}

QByteArray FFmpegMedia::seiData() const
{
    auto _this = const_cast<FFmpegMedia *>(this);
    QMutexLocker locker(_this->mmutex);
    return mcurrentSeiData;
}

void FFmpegMedia::start()
{
    if (mwatcher->isRunning())
    {
        stop();
        mwatcher->waitForFinished();
    }
    mstoped = false;
    auto future = QtConcurrent::run(this, & amp;FFmpegMedia::runGrab);
    mwatcher->setFuture(future);
}

void FFmpegMedia::stop()
{
    QMutexLocker locker(mmutex);
    mstoped = true;
}

bool FFmpegMedia::isStoped() const
{
    auto _this = const_cast<FFmpegMedia *>(this);
    QMutexLocker locker(_this->mmutex);
    return mstoped;
}

void FFmpegMedia::runGrab()
{
    static FFmpegIniter initer;

    //Create input target
    const AVInputFormat *gdiFormat = av_find_input_format("gdigrab");
    AVDictionary *srcOpt = nullptr;
    av_dict_set( & amp;srcOpt, "framerate", QByteArray::number(mframeRate), AV_DICT_MATCH_CASE);
    av_dict_set( & amp;srcOpt, "draw_mouse", "0", AV_DICT_MATCH_CASE);

    QString inputUrl = mwindowTitle;
    if (mwindowTitle != "desktop")
        inputUrl = "title=" + mwindowTitle;

    AVFormatContext *srcContext = nullptr;
    int code = avformat_open_input( & amp;srcContext, inputUrl.toUtf8(), gdiFormat, & amp;srcOpt);
    if (code != 0)
        return avError("avformat_open_input", code);

    code = avformat_find_stream_info(srcContext, nullptr);
    if (code < 0)
        return avError("avformat_find_stream_info", code);

    int srcVideoStreamIndex = -1;
    for (uint i = 0; i < srcContext->nb_streams; + + i)
    {
        if (srcContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
        {
            srcVideoStreamIndex = i;
            break;
        }
    }
    if (srcVideoStreamIndex == -1)
    {
        qInfo("error find srcVideoStreamIndex!");
        return;
    }

    AVStream *srcStream = srcContext->streams[srcVideoStreamIndex];
    const AVCodec *srcCodec = avcodec_find_decoder(srcStream->codecpar->codec_id);
    AVCodecContext *srcCodecContext = avcodec_alloc_context3(srcCodec);
    code = avcodec_parameters_to_context(srcCodecContext, srcStream->codecpar);
    if (code < 0)
        return avError("copy context", code);

    code = avcodec_open2(srcCodecContext, srcCodec, nullptr);
    if (code < 0)
        return avError("avcodec_open2", code);

    av_dump_format(srcContext, 0, srcContext->url, 0);

    //Create output target
    AVFormatContext *desContext = nullptr;
    code = avformat_alloc_output_context2( & amp;desContext, nullptr, "rtsp", "rtsp://127.0.0.1:8554/" + QByteArray::number(mgrabId));
    if (code < 0)
        return avError("avformat_alloc_output_context2", code);

    // Check whether all streams have data, if there is no data, wait for max_interleave_delta microseconds
    desContext->max_interleave_delta = 1000000;
    av_opt_set(desContext->priv_data, "rtsp_transport", "tcp", 0);
    av_dump_format(desContext, 0, desContext->url, 1);

    const AVCodec *desCodec = avcodec_find_encoder_by_name(mdesCodec.toUtf8());
    if (!desCodec)
    {
        qInfo("error: avcodec_find_encoder_by_name\
");
        return;
    }

    AVCodecContext *desCodecContext = avcodec_alloc_context3(desCodec);
    if (!desCodecContext)
    {
        qInfo("error: avcodec_alloc_context3\
");
        return;
    }

    desCodecContext->pix_fmt = AV_PIX_FMT_YUV420P;
    desCodecContext->codec_type = AVMEDIA_TYPE_VIDEO;
    desCodecContext->width = srcCodecContext->width;
    desCodecContext->height = srcCodecContext->height;
    desCodecContext->framerate = { mframeRate, 1 };
    desCodecContext->time_base = { 1, mframeRate };

    desCodecContext->gop_size = 10; // GOP (Group of Pictures) refers to a continuous group of pictures. gop_size should be greater than max_b_frames + 1
    desCodecContext->max_b_frames = 2; // B frame (Bi-directional predicted frame) is a bi-directional predicted frame.

    int size = srcCodecContext->width * srcCodecContext->height;
    int bitSize = size * mframeRate / 2;
    desCodecContext->bit_rate = bitSize; // bit ratio(bps) = frame bits / s
    desCodecContext->rc_min_rate = size;
    desCodecContext->rc_max_rate = bitSize;
    desCodecContext->rc_buffer_size = bitSize;
    desCodecContext->qcompress = 1.0;

    av_opt_set_int(desCodecContext->priv_data, "udu_sei", 1, AV_DICT_MATCH_CASE);

    if (mdesCodec == "libx264") // Only applicable to software decoded H264
        av_opt_set(desCodecContext->priv_data, "tune", "zerolatency", AV_DICT_MATCH_CASE); // Good for fast encoding and low-latency streaming, but green screen will appear

    //Open the encoder
    code = avcodec_open2(desCodecContext, desCodec, nullptr);
    if (code < 0)
        return avError("open desCodecContext", code);

    //Output video stream
    AVStream *desStream = avformat_new_stream(desContext, desCodec);
    if (!desStream)
    {
        qInfo("error: avformat_new_stream.");
        return;
    }
    code = avcodec_parameters_from_context(desStream->codecpar, desCodecContext);
    if (code < 0)
        return avError("copy des codecpar", code);

    av_dump_format(desContext, 0, desContext->url, 1);

    if (!(desContext->oformat->flags & amp; AVFMT_NOFILE))
    {
        if (avio_open( & amp;desContext->pb, desContext->url, AVIO_FLAG_WRITE) < 0) {
            qInfo("error: avio_open('%s')", desContext->url);
            return;
        }
    }

    code = avformat_write_header(desContext, nullptr);
    if (code < 0)
        return avError("avformat_write_header", code);

    SwsContext *swscontext = sws_getContext(srcCodecContext->width, srcCodecContext->height, srcCodecContext->pix_fmt,
        desCodecContext->width, desCodecContext->height, desCodecContext->pix_fmt,
        SWS_BICUBIC, nullptr, nullptr, nullptr);

    AVPacket *srcPkt = av_packet_alloc();
    AVPacket *desPkt = av_packet_alloc();
    AVFrame *srcFrame = av_frame_alloc();

    AVFrame *desFrame = av_frame_alloc();
    desFrame->width = desCodecContext->width;
    desFrame->height = desCodecContext->height;
    desFrame->format = desCodecContext->pix_fmt;

    int stride_y = desFrame->width;
    int stride_uv = desFrame->width / 2;

    desFrame->linesize[0] = stride_y;
    desFrame->linesize[1] = stride_uv;
    desFrame->linesize[2] = stride_uv;
    int buffer_size = av_image_get_buffer_size(desCodecContext->pix_fmt, desFrame->width, desFrame->height, 1);
    uint8_t *buffer = (uint8_t *)av_malloc(buffer_size);
    av_image_fill_arrays(desFrame->data, desFrame->linesize, buffer,
        desCodecContext->pix_fmt, desFrame->width, desFrame->height, 1);

    while (!isStoped())
    {
        // decode
        code = av_read_frame(srcContext, srcPkt);
        if (code < 0)
        {
            avError("av_read_frame", code);
            break;
        }

        if (srcPkt->stream_index != srcVideoStreamIndex)
            continue;

        code = avcodec_send_packet(srcCodecContext, srcPkt);
        if (code < 0)
        {
            avError("avcodec_send_packet", code);
            continue;
        }

        code = avcodec_receive_frame(srcCodecContext, srcFrame);
        if (code < 0)
        {
            avError("avcodec_receive_frame", code);
            continue;
        }

        // Frame format conversion
        sws_scale(swscontext, (const uchar *const *)srcFrame->data, srcFrame->linesize, 0, srcFrame->height,
            desFrame->data, desFrame->linesize);

        //encoding
        code = avcodec_send_frame(desCodecContext, desFrame);
        if (code < 0)
        {
            avError("avcodec_send_frame", code);
            continue;
        }

        code = avcodec_receive_packet(desCodecContext, desPkt);
        if (code < 0)
        {
            avError("avcodec_receive_packet", code);
            continue;
        }

        desPkt->pts = av_rescale_q_rnd(srcPkt->pts, srcStream->time_base, desStream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_NEAR_INF));
        desPkt->dts = av_rescale_q_rnd(srcPkt->dts, srcStream->time_base, desStream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_NEAR_INF));
        desPkt->duration = av_rescale_q(srcPkt->duration, srcStream->time_base, desStream->time_base);
        desPkt->pos = -1;
        desPkt->stream_index = desStream->index;

        QByteArray data = seiData();
        if (!data.isEmpty())
            addSeiData(desPkt, data);
        
        code = av_interleaved_write_frame(desContext, desPkt);
        if (code < 0)
        {
            avError("av_write_frame", code);
            continue;
        }

        av_packet_unref(srcPkt);
        av_packet_unref(desPkt);
        av_frame_unref(srcFrame);
    }
    av_free(buffer);
    av_packet_free( & amp;srcPkt);
    av_packet_free( & amp;desPkt);
    av_frame_free( & amp;srcFrame);
    av_frame_free( & amp;desFrame);
    av_write_trailer(desContext);
    avcodec_close(desCodecContext);
    avcodec_close(srcCodecContext);
    avformat_close_input( & amp;srcContext);
    avformat_close_input( & amp;desContext);
}

use:

auto media = new FFmpegMedia(this);
media->setTargetWindowTitle(windowTitle()); //No need to set when capturing full screen.
media->setFrameRate(144); // Set frame rate
media->setSeiData(QByteArray("hello! this is sei info test!")); // This interface can set SEI information in real time during the streaming process
media->start();

Notice:

Before turning on push streaming, you must first use mediamtx and other tools to enable the RTSP service, otherwise push streaming will not be possible.

Through experiments, we found that using the av_packet_new_side_data() or av_frame_new_side_data() methods cannot properly add custom SEI content (cannot be parsed). Therefore, it can only be forcibly added to the h264\h265 encoded data packet.

Regarding how to display rtsp and parse SEI information, you can read my next blog post:

http://t.csdnimg.cn/lPGwj