
Recherche avancée
Autres articles (42)
-
Personnaliser en ajoutant son logo, sa bannière ou son image de fond
5 septembre 2013, parCertains thèmes prennent en compte trois éléments de personnalisation : l’ajout d’un logo ; l’ajout d’une bannière l’ajout d’une image de fond ;
-
Ecrire une actualité
21 juin 2013, parPrésentez les changements dans votre MédiaSPIP ou les actualités de vos projets sur votre MédiaSPIP grâce à la rubrique actualités.
Dans le thème par défaut spipeo de MédiaSPIP, les actualités sont affichées en bas de la page principale sous les éditoriaux.
Vous pouvez personnaliser le formulaire de création d’une actualité.
Formulaire de création d’une actualité Dans le cas d’un document de type actualité, les champs proposés par défaut sont : Date de publication ( personnaliser la date de publication ) (...) -
Publier sur MédiaSpip
13 juin 2013Puis-je poster des contenus à partir d’une tablette Ipad ?
Oui, si votre Médiaspip installé est à la version 0.2 ou supérieure. Contacter au besoin l’administrateur de votre MédiaSpip pour le savoir
Sur d’autres sites (6075)
-
Ffmpeg AVAudioFifo memory leak
10 août 2020, par ExpressingxI'm encoding audio to
AAC
with encoder. Because it requires I'm usingAVAudioFifo
following https://ffmpeg.org/doxygen/4.0/transcode_aac_8c-example.html

Everything works, but I can see my memory slowly growing up. If I comment out
PushToFifo()
no memory leaks. And not sure why. I've profiled the app withANTS Profiler
and I can see a lot of unmanaged memory allocated byavutil-x.dll
.

while (ffmpeg.av_read_frame(_inputContext.InputFormatContext, pkt) >= 0)
{
 // decode
 int ret = ffmpeg.avcodec_send_packet(decCtx, pkt);

 if (ret < 0)
 {
 ffmpeg.av_packet_unref(pkt);
 return;
 }

 while (ret >= 0)
 {
 ret = ffmpeg.avcodec_receive_frame(decCtx, frame);

 if (ret == ffmpeg.AVERROR(ffmpeg.EAGAIN) || ret == ffmpeg.AVERROR_EOF)
 {
 return;
 }
 
 // push to fifo
 PushToFifo();
 TryFlushSamples();
 }
}



PushToFifo()


byte* samples = null;

 if (ffmpeg.av_samples_alloc(&samples, null, _outputContext->channels, inputFrame->nb_samples, _outputContext->sample_fmt, 0) < 0)
 {
 ffmpeg.av_freep(&samples[0]);
 ffmpeg.av_free(samples);
 // throw
 }

 if (ffmpeg.swr_convert(_resamplerCtx, &samples, inputFrame->nb_samples, inputFrame->extended_data, inputFrame->nb_samples) < 0)
 {
 throw new Exception();
 }

 if (ffmpeg.av_audio_fifo_realloc(AudioFifo, ffmpeg.av_audio_fifo_size(AudioFifo) + inputFrame->nb_samples) < 0)
 {
 throw new Exception();
 }

 if (ffmpeg.av_audio_fifo_write(AudioFifo, (void**)&samples, inputFrame->nb_samples) < 0)
 {
 throw new Exception();
 }



And in
TryFlushSamples();


while (ffmpeg.av_audio_fifo_size(_audioFifo.AudioFifo) >= _outputContext.AudioEncodeContext->frame_size)
 {
 int fifoSize = ffmpeg.av_audio_fifo_size(_audioFifo.AudioFifo);
 int frameSize = fifoSize > _outputContext.AudioEncodeContext->frame_size
 ? _outputContext.AudioEncodeContext->frame_size
 : fifoSize;

 var outputContext = _outputContext.AudioEncodeContext;
 var frame = ffmpeg.av_frame_alloc();
 frame->nb_samples = frameSize;
 frame->channel_layout = outputContext->channel_layout;
 frame->format = (int)outputContext->sample_fmt;
 frame->sample_rate = outputContext->sample_rate;

 if (ffmpeg.av_frame_get_buffer(frame, 0) < 0)
 ffmpeg.av_frame_free(&frame);

 // read frame
 if (ffmpeg.av_audio_fifo_read(_audioFifo.AudioFifo, (void**)&frame->data, frameSize) < frameSize)
 {
 ffmpeg.av_frame_free(&frame);
 return;
 }

 frame->pts = _audioFrameCount;
 _audioFrameCount += frame->nb_samples;

 // send to encoder 

 ffmpeg.av_frame_free(&frame);
 }



-
Ffmpeg H.264 encode video is sped up if camera capture with low light
10 août 2020, par ExpressingxI'm encoding everything to
H.264
. Ifh264_qsv
is available I'm using it, elselibx264
. Works fine, but I noticed that if the camera is recording in low light, the video saved is sped up like x2 or x3. And I'm not sure where the problem is. Creating the input format context :

private AVFormatContext* CreateFormatContext()
 {
 AVDictionary* options = null;

 ffmpeg.av_dict_set(&options, "packet-buffering", "0", 0);
 ffmpeg.av_dict_set(&options, "sync", "1", 0);
 ffmpeg.av_dict_set(&options, "rtsp_transport", "tcp", 0);
 ffmpeg.av_dict_set(&options, "reconnect", "1", 0);
 ffmpeg.av_dict_set(&options, "analyzeduration", "2000000", 0);
 ffmpeg.av_dict_set(&options, "probesize", (16384 * 16).ToString(), 0);
 ffmpeg.av_dict_set(&options, "max_delay", "0", 0);
 ffmpeg.av_dict_set(&options, "reorder_queue_size", "0", 0);
 ffmpeg.av_dict_set(&options, "skip_frame", "8", 0);
 ffmpeg.av_dict_set(&options, "skip_loop_filter", "48", 0);
 ffmpeg.av_dict_set(&options, "rtbufsize", "1000M", 0);

 AVFormatContext* pInputFmtCtx = ffmpeg.avformat_alloc_context();

 AVInputFormat* inputFormat = null;

 if (!string.IsNullOrEmpty(_format))
 {
 inputFormat = ffmpeg.av_find_input_format(_format);

 if (inputFormat == null)
 {
 //throw
 }
 }

 int ret = ffmpeg.avformat_open_input(&pInputFmtCtx, _streamUrl, inputFormat, &options);

 if (ret != 0)
 {
 //throw
 }

 return pInputFmtCtx;
 }



video decoder


private void CreateVideoDecoder()
 {
 AVStream* videoStream = InputFormatContext->streams[VideoStreamIndex];
 AVCodecParameters* videoCodecParams = videoStream->codecpar;
 AVCodec* videoDecoder = ffmpeg.avcodec_find_decoder(videoCodecParams->codec_id);

 VideoDecodeContext = ffmpeg.avcodec_alloc_context3(videoDecoder);

 if (ffmpeg.avcodec_parameters_to_context(VideoDecodeContext, videoCodecParams) < 0)
 {
 //throw
 }

 if (ffmpeg.avcodec_open2(VideoDecodeContext, videoDecoder, null) < 0)
 {
 //throw
 }
 }



and the h264 encoder


private void CreateH264Encoder(AVStream* inputStream, AVStream* outputStream)
 {
 AVRational framerate = ffmpeg.av_guess_frame_rate(_inputContext.InputFormatContext, inputStream, null);

 AVCodec* videoEncoder = ffmpeg.avcodec_find_encoder_by_name("h264_qsv");
 if (videoEncoder == null)
 {
 videoEncoder = ffmpeg.avcodec_find_encoder_by_name("libx264");
 PixelFormat = AVPixelFormat.AV_PIX_FMT_YUV420P;
 }

 if (videoEncoder == null)
 {
 //throw
 }

 VideoEncodeContext = ffmpeg.avcodec_alloc_context3(videoEncoder);

 if (VideoEncodeContext == null)
 {
 //throw
 }

 VideoEncodeContext->width = _inputContext.VideoDecodeContext->width;
 VideoEncodeContext->height = _inputContext.VideoDecodeContext->height;
 VideoEncodeContext->pix_fmt = PixelFormat;
 VideoEncodeContext->bit_rate = 2 * 1000 * 1000;
 VideoEncodeContext->rc_buffer_size = 4 * 1000 * 1000;
 VideoEncodeContext->rc_max_rate = 2 * 1000 * 1000;
 VideoEncodeContext->rc_min_rate = 3 * 1000 * 1000;
 VideoEncodeContext->framerate = framerate;
 VideoEncodeContext->max_b_frames = 0;
 VideoEncodeContext->time_base = ffmpeg.av_inv_q(framerate);
 VideoEncodeContext->flags |= ffmpeg.AV_CODEC_FLAG_GLOBAL_HEADER;

 ffmpeg.av_opt_set(VideoEncodeContext->priv_data, "preset", "slow", 0);
 ffmpeg.av_opt_set(VideoEncodeContext->priv_data, "vprofile", "baseline", 0);

 if (ffmpeg.avcodec_open2(VideoEncodeContext, videoEncoder, null) < 0)
 {
 //throw
 }

 ffmpeg.avcodec_parameters_from_context(outputStream->codecpar, VideoEncodeContext);
 }



I'm using ffmpeg 4.0.1, so I'm decoding/encoding with the new format API which I'll skip to share for now because its nothing more than following the link : https://ffmpeg.org/doxygen/3.3/group__lavc__encdec.html


-
ffmpeg streaming of audio and video using rtmp
30 juillet 2020, par weicheng.yu

I want to stream some videos (a dynamic playlist managed by a python script) to a RTMP server, and i'm currently doing something quite simple : streaming my videos one by one with FFMPEG to the RTMP server, however this causes a connection break every time a video end, and the stream is ready to go when the next video begins.


I would like to stream those videos without any connection breaks continuously, then the stream could be correctly viewed.


I use this command to stream my videos one by one to the server


while (CanRun)
 {
 try
 {
 do
 {
 // 读取一帧未解码数据
 error = ffmpeg.av_read_frame(pFormatContext, pPacket);
 // Console.WriteLine(pPacket->dts);
 if (error == ffmpeg.AVERROR_EOF) break;
 if (error < 0) throw new ApplicationException(GetErrorMessage(error));

 if (pPacket->stream_index == pStream->index) { }
 else if (pPacket->stream_index == aStream->index)
 {
 AVPacket* aVPacket = ffmpeg.av_packet_clone(pPacket);
 if (Aqueue.Count > 49) Aqueue.Dequeue();
 Aqueue.Enqueue(*aVPacket);

 ++AframeNumber;
 continue;
 }
 else
 {
 ffmpeg.av_packet_unref(pPacket);//释放数据包对象引用
 continue;
 }

 // 解码
 error = ffmpeg.avcodec_send_packet(pCodecContext, pPacket);
 if (error < 0) throw new ApplicationException(GetErrorMessage(error));
 // 解码输出解码数据
 error = ffmpeg.avcodec_receive_frame(pCodecContext, pDecodedFrame);
 } while (error == ffmpeg.AVERROR(ffmpeg.EAGAIN) && CanRun);
 if (error == ffmpeg.AVERROR_EOF) break;
 if (error < 0) throw new ApplicationException(GetErrorMessage(error));
 if (pPacket->stream_index != pStream->index) continue;

 AVFrame* aVFrame = ffmpeg.av_frame_clone(pDecodedFrame);
 if (Vqueue.Count > 49) Vqueue.Dequeue();
 Vqueue.Enqueue(*aVFrame);
 }
 finally
 {
 ffmpeg.av_packet_unref(pPacket);//释放数据包对象引用
 ffmpeg.av_frame_unref(pDecodedFrame);//释放解码帧对象引用
 }

 VframeNumber++;
 FFmpeg_Manager.ShowMessage = string.Format(ProgramInfo, VframeNumber, AframeNumber, exhibitionNum, effectiveNum);
 }