Recherche avancée

Médias (1)

Mot : - Tags -/stallman

Sur d’autres sites (1907)

  • Encoding to h264 failed to send some frames using ffmpeg c api

    8 juillet 2020, par Vuwox

    Using FFMPEG C API, Im trying to push generated image to MP4 format.

    


    When I push frame-by-frame, the muxing seems to failed on avcodec_receive_packet(...) which return AVERROR(EAGAIN) on the first frames, but after a while is starting to add my frame, but the first one.

    


    What I mean, is that when push frame 1 to 13, I have errors, but after frame 14 to end (36), the frame are added to the video, but the encoded image are not the 14 to 36, instead its the frame 1 to 23 that are added.

    


    I don't understand, is this a problem with the framerate (which i want 12 fps), or with key/inter- frame ?

    


    Here the code for different part of the class,

    


    NOTE :

    


      

    • m_filename = "C :\tmp\test.mp4"
    • 


    • m_framerate = 12
    • 


    • m_width = 1080
    • 


    • m_height = 1080
    • 


    


    ctor

    


    // Allocate the temporary buffer that hold the our generated image in RGB.
picture_rgb24 = av_frame_alloc();
picture_rgb24->pts = 0;
picture_rgb24->data[0] = NULL;
picture_rgb24->linesize[0] = -1;
picture_rgb24->format = AV_PIX_FMT_RGB24;
picture_rgb24->height = m_height;
picture_rgb24->width = m_width;

if ((_ret = av_image_alloc(picture_rgb24->data, picture_rgb24->linesize, m_width, m_height, (AVPixelFormat)picture_rgb24->format, 24)) < 0)
    throw ...

// Allocate the temporary frame that will be convert from RGB to YUV using ffmpeg api.
frame_yuv420 = av_frame_alloc();
frame_yuv420->pts = 0;
frame_yuv420->data[0] = NULL;
frame_yuv420->linesize[0] = -1;
frame_yuv420->format = AV_PIX_FMT_YUV420P;
frame_yuv420->width = m_height;
frame_yuv420->height = m_width;

if ((_ret = av_image_alloc(frame_yuv420->data, frame_yuv420->linesize, m_width, m_height, (AVPixelFormat)frame_yuv420->format, 32)) < 0)
    throw ...

init_muxer(); // see below.

m_inited = true;
    
m_pts_increment = av_rescale_q(1, { 1, m_framerate }, ofmt_ctx->streams[0]->time_base);

// Context that convert the RGB24 to YUV420P format (using this instead of filter similar to GIF).
swsCtx = sws_getContext(m_width, m_height, AV_PIX_FMT_RGB24, m_width, m_height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, 0, 0, 0);


    


    init_muxer :

    


    AVOutputFormat* oformat = av_guess_format(nullptr, m_filename.c_str(), nullptr);
if (!oformat) throw ...

_ret = avformat_alloc_output_context2(&ofmt_ctx, oformat, nullptr, m_filename.c_str());
if (_ret) throw ...

AVCodec *codec = avcodec_find_encoder(oformat->video_codec);
if (!codec) throw ...

AVStream *stream = avformat_new_stream(ofmt_ctx, codec);
if (!stream) throw ...

o_codec_ctx = avcodec_alloc_context3(codec);
if (!o_codec_ctx) throw ...

stream->codecpar->codec_id = oformat->video_codec;
stream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
stream->codecpar->width = m_width;
stream->codecpar->height = m_height;
stream->codecpar->format = AV_PIX_FMT_YUV420P;
stream->codecpar->bit_rate = 400000;

avcodec_parameters_to_context(o_codec_ctx, stream->codecpar);
o_codec_ctx->time_base = { 1, m_framerate };

// Using gop_size == 0, we want 'intra' frame, so no b-frame will be generated.
o_codec_ctx->max_b_frames = 0;
o_codec_ctx->gop_size = 0;
o_codec_ctx->b_quant_offset = 0;
//o_codec_ctx->framerate = { m_framerate , 1 };

if (stream->codecpar->codec_id == AV_CODEC_ID_H264)
    av_opt_set(o_codec_ctx, "preset", "ultrafast", 0);      // Lossless H.264
else if (stream->codecpar->codec_id == AV_CODEC_ID_H265)
    av_opt_set(o_codec_ctx, "preset", "ultrafast", 0);      // Lossless H.265

avcodec_parameters_from_context(stream->codecpar, o_codec_ctx);

if ((_ret = avcodec_open2(o_codec_ctx, codec, NULL)) < 0)
    throw ...

if ((_ret = avio_open(&ofmt_ctx->pb, m_filename.c_str(), AVIO_FLAG_WRITE)) < 0)
    throw ...

if ((_ret = avformat_write_header(ofmt_ctx, NULL)) < 0)
    throw ...

av_dump_format(ofmt_ctx, 0, m_filename.c_str(), 1);


    


    add_frame :

    


    // loop to transfer our image format to ffmpeg one.
for (int y = 0; y < m_height; y++)
{
    for (int x = 0; x < m_width; x++)
    {
        picture_rgb24->data[0][idx] = ...;
        picture_rgb24->data[0][idx + 1] = ...;
        picture_rgb24->data[0][idx + 2] = ...;
    }
}

// From RGB to YUV
sws_scale(swsCtx, (const uint8_t * const *)picture_rgb24->data, picture_rgb24->linesize, 0, m_height, frame_yuv420->data, frame_yuv420->linesize);

// mux the YUV frame
muxing_one_frame(frame_yuv420);

// Increment the FPS of the picture for the next add-up to the buffer.      
picture_rgb24->pts += m_pts_increment;
frame_yuv420->pts += m_pts_increment;


    


    muxing_one_frame :

    


    int ret = avcodec_send_frame(o_codec_ctx, frame);
AVPacket *pkt = av_packet_alloc();
av_init_packet(pkt);

while (ret >= 0) {
    ret = avcodec_receive_packet(o_codec_ctx, pkt);
    if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) break;        
    av_write_frame(ofmt_ctx, pkt);
}
av_packet_unref(pkt);


    


    close_file :

    


    av_write_trailer(ofmt_ctx);
avio_close(ofmt_ctx->pb);


    


  • An efficient way to use Windows Named Pipe for IPC

    26 juillet 2020, par Ehsan5

    I am using jna module to connect two processes that both perform FFMPEG commands. Send SDTOUT of FFMPEG command on the server side to NampedPipe and receive STDIN from that NampedPipe for other FFMPEG command on the Client side.

    


    this is how I capture STDOUT and send into the pipe in server Side :

    


    InputStream out = inputProcess.getInputStream();
byte[] buffer = new byte[maxBufferSize];
while (inputProcess.isAlive()) {
     int no = out.available();
     if (no > 0 && no > maxBufferSize) {
        int n = out.read(buffer, 0,maxBufferSize);
        IntByReference lpNumberOfBytesWritten = new IntByReference(maxBufferSize);
        Kernel32.INSTANCE.WriteFile(pipe, buffer, buffer.length, lpNumberOfBytesWritten, null);
     }
}


    


    And this is how I capture STDIN and feed it to the Client Side :

    


    OutputStream in = outputProcess.getOutputStream();
while (pipeOpenValue >= 1 && outputProcess.isAlive() && ServerIdState) {
      // read from pipe
      resp = Kernel32.INSTANCE.ReadFile(handle, readBuffer,readBuffer.length, lpNumberOfBytesRead, null);
      // Write to StdIn inputProcess
      if (outputProcess != null) {
          in.write(readBuffer);
          in.flush();
      }
      // check pipe status
      Kernel32.INSTANCE.GetNamedPipeHandleState(handle, null,PipeOpenStatus, null, null, null, 2048);
      pipeOpenValue = PipeOpenStatus.getValue();
      WinDef.ULONGByReference ServerId = new WinDef.ULONGByReference();
      ServerIdState = Kernel32.INSTANCE.GetNamedPipeServerProcessId(handle, ServerId);
}


    


    But I faced two problems :

    


      

    1. High CPU usage due to iterating two loops in Server and Client. (find by profiling resources by VisualVM)
    2. 


    3. Slower operation than just connecting two FFMPEG command with regular | in command prompt. Speed depends on buffer size but large buffer size blocks operation and small buffer size reduce speed further.
    4. 


    


    Questions :

    


      

    1. Is there any way not to send and receive in chunks of bytes ? Just stream STDOUT to the Namedpipe and capture it in Client. (Eliminate two Loops)
    2. 


    3. If I cant use NampedPipe, is there any other way to Connect two FFMPEG process that runs in different java modules but in the same machine ?
    4. 


    


    Thanks

    


  • ffmpeg streaming of audio and video using rtmp

    30 juillet 2020, par weicheng.yu

    开发包

    


    I want to stream some videos (a dynamic playlist managed by a python script) to a RTMP server, and i'm currently doing something quite simple : streaming my videos one by one with FFMPEG to the RTMP server, however this causes a connection break every time a video end, and the stream is ready to go when the next video begins.

    


    I would like to stream those videos without any connection breaks continuously, then the stream could be correctly viewed.

    


    I use this command to stream my videos one by one to the server

    


     while (CanRun)
        {
            try
            {
                do
                {
                    // 读取一帧未解码数据
                    error = ffmpeg.av_read_frame(pFormatContext, pPacket);
                    // Console.WriteLine(pPacket->dts);
                    if (error == ffmpeg.AVERROR_EOF) break;
                    if (error < 0) throw new ApplicationException(GetErrorMessage(error));

                    if (pPacket->stream_index == pStream->index) { }
                    else if (pPacket->stream_index == aStream->index)
                    {
                        AVPacket* aVPacket = ffmpeg.av_packet_clone(pPacket);
                        if (Aqueue.Count > 49) Aqueue.Dequeue();
                        Aqueue.Enqueue(*aVPacket);

                        ++AframeNumber;
                        continue;
                    }
                    else
                    {
                        ffmpeg.av_packet_unref(pPacket);//释放数据包对象引用
                        continue;
                    }

                    // 解码
                    error = ffmpeg.avcodec_send_packet(pCodecContext, pPacket);
                    if (error < 0) throw new ApplicationException(GetErrorMessage(error));
                    // 解码输出解码数据
                    error = ffmpeg.avcodec_receive_frame(pCodecContext, pDecodedFrame);
                } while (error == ffmpeg.AVERROR(ffmpeg.EAGAIN) && CanRun);
                if (error == ffmpeg.AVERROR_EOF) break;
                if (error < 0) throw new ApplicationException(GetErrorMessage(error));
                if (pPacket->stream_index != pStream->index) continue;

                AVFrame* aVFrame = ffmpeg.av_frame_clone(pDecodedFrame);
                if (Vqueue.Count > 49) Vqueue.Dequeue();
                Vqueue.Enqueue(*aVFrame);
            }
            finally
            {
                ffmpeg.av_packet_unref(pPacket);//释放数据包对象引用
                ffmpeg.av_frame_unref(pDecodedFrame);//释放解码帧对象引用
            }

            VframeNumber++;
            FFmpeg_Manager.ShowMessage = string.Format(ProgramInfo, VframeNumber, AframeNumber, exhibitionNum, effectiveNum);
        }