Recherche avancée

Médias (0)

Mot : - Tags -/diogene

Aucun média correspondant à vos critères n’est disponible sur le site.

Autres articles (46)

  • Les formats acceptés

    28 janvier 2010, par

    Les commandes suivantes permettent d’avoir des informations sur les formats et codecs gérés par l’installation local de ffmpeg :
    ffmpeg -codecs ffmpeg -formats
    Les format videos acceptés en entrée
    Cette liste est non exhaustive, elle met en exergue les principaux formats utilisés : h264 : H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 m4v : raw MPEG-4 video format flv : Flash Video (FLV) / Sorenson Spark / Sorenson H.263 Theora wmv :
    Les formats vidéos de sortie possibles
    Dans un premier temps on (...)

  • Keeping control of your media in your hands

    13 avril 2011, par

    The vocabulary used on this site and around MediaSPIP in general, aims to avoid reference to Web 2.0 and the companies that profit from media-sharing.
    While using MediaSPIP, you are invited to avoid using words like "Brand", "Cloud" and "Market".
    MediaSPIP is designed to facilitate the sharing of creative media online, while allowing authors to retain complete control of their work.
    MediaSPIP aims to be accessible to as many people as possible and development is based on expanding the (...)

  • Ajouter notes et légendes aux images

    7 février 2011, par

    Pour pouvoir ajouter notes et légendes aux images, la première étape est d’installer le plugin "Légendes".
    Une fois le plugin activé, vous pouvez le configurer dans l’espace de configuration afin de modifier les droits de création / modification et de suppression des notes. Par défaut seuls les administrateurs du site peuvent ajouter des notes aux images.
    Modification lors de l’ajout d’un média
    Lors de l’ajout d’un média de type "image" un nouveau bouton apparait au dessus de la prévisualisation (...)

Sur d’autres sites (3866)

  • Green tint color shift converting RGBA to YUV420p ffmpeg libavcodec

    2 août 2017, par Michael B

    I was wondering someone could help. I’m currently taking converting RBGA bitmaps to YUV420p before encoding the frames with h264 codec and dumping the encoded packets to file successfully.

    I’m also able to playback the h264 video file in VLC. However, there seems to be a color shift which is more apparent where grey is actually light tint of screen. I’m used a an animation video as my example which is supposed to show a blue sky in the background, however after converting the image, I’m getting an orange sky background. Do you have any ideas how I can fix this please ?

    Do you happen to know if it’s possible to convert rgba to yuv BEFORE calling sws_scale and before encoding packet ?

  • ffmpeg encoding, Opus sound in the webm container does not work

    10 mars 2019, par Mockarutan

    I’m trying to encode audio and video to a webm file with VP8 and Opus encoding. It almost works. (I use FFmpeg 3.3.2)

    I can make a only video webm file and play it in VLC, FFPlay and upload it to YouTube (and all works). If I add Opus sound to the file, it still works in VLC but not in FFPlay or on youtube, on youtube the sound becomes just "ticks".

    I have the same problem if I encode only Opus audio to the webm file ; it only works in VLC. But if I encode only Opus audio to a ogg container it works everywhere, and I can even use FFmpeg to combine the ogg file with a video only webm file and produce a fully working webm file with audio and video.

    So it seems to me that only when I use my code to encode Opus into a webm container, it just wont work in most players and YouTube. I need it to work in youtube.

    Here is the code for the opus to webm only encoding (you can toggle ogg/webm with the define) : https://pastebin.com/jyQ4s3tB

    #include <algorithm>
    #include <iterator>

    extern "C"
    {

    //#define OGG

    #include "libavcodec/avcodec.h"
    #include "libavdevice/avdevice.h"
    #include "libavfilter/avfilter.h"
    #include "libavformat/avformat.h"
    #include "libavutil/avutil.h"
    #include "libavutil/imgutils.h"
    #include "libswscale/swscale.h"
    #include "libswresample/swresample.h"

       enum InfoCodes
       {
           ENCODED_VIDEO,
           ENCODED_AUDIO,
           ENCODED_AUDIO_AND_VIDEO,
           NOT_ENOUGH_AUDIO_DATA,
       };

       enum ErrorCodes
       {
           RES_NOT_MUL_OF_TWO = -1,
           ERROR_FINDING_VID_CODEC = -2,
           ERROR_CONTEXT_CREATION = -3,
           ERROR_CONTEXT_ALLOCATING = -4,
           ERROR_OPENING_VID_CODEC = -5,
           ERROR_OPENING_FILE = -6,
           ERROR_ALLOCATING_FRAME = -7,
           ERROR_ALLOCATING_PIC_BUF = -8,
           ERROR_ENCODING_FRAME_SEND = -9,
           ERROR_ENCODING_FRAME_RECEIVE = -10,
           ERROR_FINDING_AUD_CODEC = -11,
           ERROR_OPENING_AUD_CODEC = -12,
           ERROR_INIT_RESMPL_CONTEXT = -13,
           ERROR_ENCODING_SAMPLES_SEND = -14,
           ERROR_ENCODING_SAMPLES_RECEIVE = -15,
           ERROR_WRITING_HEADER = -16,
           ERROR_INIT_AUDIO_RESPAMLER = -17,
       };

       AVCodecID aud_codec_comp_id = AV_CODEC_ID_OPUS;
       AVSampleFormat sample_fmt_comp = AV_SAMPLE_FMT_FLT;

       AVCodecID aud_codec_id;
       AVSampleFormat sample_fmt;

    #ifndef OGG
       char* compressed_cont = "webm";
    #endif
    #ifdef OGG
       char* compressed_cont = "ogg";
    #endif

       AVCodec *aud_codec = NULL;
       AVCodecContext *aud_codec_context = NULL;
       AVFormatContext *outctx;
       AVStream *audio_st;
       AVFrame *aud_frame;
       SwrContext *audio_swr_ctx;

       int vid_frame_counter, aud_frame_counter;
       int vid_width, vid_height;

       char* concat(const char *s1, const char *s2)
       {
           char *result = (char*)malloc(strlen(s1) + strlen(s2) + 1);

           strcpy(result, s1);
           strcat(result, s2);

           return result;
       }

       int setup_audio_codec()
       {
           aud_codec_id = aud_codec_comp_id;
           sample_fmt = sample_fmt_comp;

           // Fixup audio codec
           if (aud_codec == NULL)
           {
               aud_codec = avcodec_find_encoder(aud_codec_id);
               avcodec_register(aud_codec);
           }

           if (!aud_codec)
               return ERROR_FINDING_AUD_CODEC;

           return 0;
       }

       int initialize_audio_stream(AVFormatContext *local_outctx, int sample_rate, int per_frame_audio_samples, int audio_bitrate)
       {
           aud_codec_context = avcodec_alloc_context3(aud_codec);
           if (!aud_codec_context)
               return ERROR_CONTEXT_CREATION;

           aud_codec_context->bit_rate = audio_bitrate;
           aud_codec_context->sample_rate = sample_rate;
           aud_codec_context->sample_fmt = sample_fmt;
           aud_codec_context->channel_layout = AV_CH_LAYOUT_STEREO;
           aud_codec_context->channels = av_get_channel_layout_nb_channels(aud_codec_context->channel_layout);
           //aud_codec_context->profile = FF_PROFILE_AAC_MAIN;

           aud_codec_context->codec = aud_codec;
           aud_codec_context->codec_id = aud_codec_id;

           AVRational time_base;
           time_base.num = per_frame_audio_samples;
           time_base.den = aud_codec_context->sample_rate;
           aud_codec_context->time_base = time_base;

           int ret = avcodec_open2(aud_codec_context, aud_codec, NULL);

           if (ret &lt; 0)
               return ERROR_OPENING_AUD_CODEC;

           local_outctx->audio_codec = aud_codec;
           local_outctx->audio_codec_id = aud_codec_id;

           audio_st = avformat_new_stream(local_outctx, aud_codec);

           audio_st->codecpar->bit_rate = aud_codec_context->bit_rate;
           audio_st->codecpar->sample_rate = aud_codec_context->sample_rate;
           audio_st->codecpar->channels = aud_codec_context->channels;
           audio_st->codecpar->channel_layout = aud_codec_context->channel_layout;
           audio_st->codecpar->codec_id = aud_codec_context->codec_id;
           audio_st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
           audio_st->codecpar->format = aud_codec_context->sample_fmt;
           audio_st->codecpar->frame_size = aud_codec_context->frame_size;
           audio_st->codecpar->block_align = aud_codec_context->block_align;
           audio_st->codecpar->initial_padding = aud_codec_context->initial_padding;
           audio_st->codecpar->extradata = aud_codec_context->extradata;
           audio_st->codecpar->extradata_size = aud_codec_context->extradata_size;

           aud_frame = av_frame_alloc();
           aud_frame->nb_samples = aud_codec_context->frame_size;
           aud_frame->format = aud_codec_context->sample_fmt;
           aud_frame->channel_layout = aud_codec_context->channel_layout;
           aud_frame->sample_rate = aud_codec_context->sample_rate;

           int buffer_size;
           if (aud_codec_context->frame_size == 0)
           {
               buffer_size = per_frame_audio_samples * 2 * 4;
               aud_frame->nb_samples = per_frame_audio_samples;
           }
           else
           {
               buffer_size = av_samples_get_buffer_size(NULL, aud_codec_context->channels, aud_codec_context->frame_size,
                   aud_codec_context->sample_fmt, 0);
           }

           if (av_sample_fmt_is_planar(sample_fmt))
               ret = av_frame_get_buffer(aud_frame, buffer_size / 2);
           else
               ret = av_frame_get_buffer(aud_frame, buffer_size);

           if (!aud_frame || ret &lt; 0)
               return ERROR_ALLOCATING_FRAME;

           aud_frame_counter = 0;

           return 0;
       }

       int initialize_audio_only_encoding(int sample_rate, int per_frame_audio_samples, int audio_bitrate, const char *filename)
       {
           int ret;

           avcodec_register_all();
           av_register_all();

           outctx = avformat_alloc_context();

           char* with_dot = concat(filename, ".");
           char* full_filename = concat(with_dot, compressed_cont);

           ret = avformat_alloc_output_context2(&amp;outctx, NULL, compressed_cont, full_filename);

           free(with_dot);

           if (ret &lt; 0)
           {
               free(full_filename);
               return ERROR_CONTEXT_CREATION;
           }

           ret = setup_audio_codec();
           if (ret &lt; 0)
               return ret;

           // Setup Audio
           ret = initialize_audio_stream(outctx, sample_rate, per_frame_audio_samples, audio_bitrate);
           if (ret &lt; 0)
               return ret;

           av_dump_format(outctx, 0, full_filename, 1);

           if (!(outctx->oformat->flags &amp; AVFMT_NOFILE))
           {
               if (avio_open(&amp;outctx->pb, full_filename, AVIO_FLAG_WRITE) &lt; 0)
               {
                   free(full_filename);
                   return ERROR_OPENING_FILE;
               }
           }

           free(full_filename);

           ret = avformat_write_header(outctx, NULL);
           if (ret &lt; 0)
               return ERROR_WRITING_HEADER;

           return 0;
       }

       int write_interleaved_audio_frame(float_t *aud_sample)
       {
           int ret;

           aud_frame->data[0] = (uint8_t*)aud_sample;
           aud_frame->extended_data[0] = (uint8_t*)aud_sample;

           aud_frame->pts = aud_frame_counter++;

           ret = avcodec_send_frame(aud_codec_context, aud_frame);

           AVPacket pkt;
           av_init_packet(&amp;pkt);
           pkt.data = NULL;
           pkt.size = 0;

           while (true)
           {
               ret = avcodec_receive_packet(aud_codec_context, &amp;pkt);
               if (!ret)
               {
                   av_packet_rescale_ts(&amp;pkt, aud_codec_context->time_base, audio_st->time_base);

                   pkt.stream_index = audio_st->index;

                   av_interleaved_write_frame(outctx, &amp;pkt);

                   av_packet_unref(&amp;pkt);
               }
               if (ret == AVERROR(EAGAIN))
                   break;
               else if (ret &lt; 0)
                   return ERROR_ENCODING_SAMPLES_RECEIVE;
               else
                   break;
           }

           return ENCODED_AUDIO;
       }

       int write_audio_frame(float_t *aud_sample)
       {
           int ret;
           aud_frame->data[0] = (uint8_t*)aud_sample;
           aud_frame->extended_data[0] = (uint8_t*)aud_sample;

           aud_frame->pts = aud_frame_counter++;

           ret = avcodec_send_frame(aud_codec_context, aud_frame);
           if (ret &lt; 0)
               return ERROR_ENCODING_FRAME_SEND;

           AVPacket pkt;
           av_init_packet(&amp;pkt);
           pkt.data = NULL;
           pkt.size = 0;

           fflush(stdout);

           while (true)
           {
               ret = avcodec_receive_packet(aud_codec_context, &amp;pkt);
               if (!ret)
                   if (pkt.pts != AV_NOPTS_VALUE)
                       pkt.pts = av_rescale_q(pkt.pts, aud_codec_context->time_base, audio_st->time_base);
               if (pkt.dts != AV_NOPTS_VALUE)
                   pkt.dts = av_rescale_q(pkt.dts, aud_codec_context->time_base, audio_st->time_base);
               {

                   av_write_frame(outctx, &amp;pkt);
                   av_packet_unref(&amp;pkt);
               }
               if (ret == AVERROR(EAGAIN))
                   break;
               else if (ret &lt; 0)
                   return ERROR_ENCODING_FRAME_RECEIVE;
               else
                   break;
           }

           return ENCODED_AUDIO;
       }

       int finish_audio_encoding()
       {
           AVPacket pkt;
           av_init_packet(&amp;pkt);
           pkt.data = NULL;
           pkt.size = 0;

           fflush(stdout);

           int ret = avcodec_send_frame(aud_codec_context, NULL);
           if (ret &lt; 0)
               return ERROR_ENCODING_FRAME_SEND;

           while (true)
           {
               ret = avcodec_receive_packet(aud_codec_context, &amp;pkt);
               if (!ret)
               {
                   if (pkt.pts != AV_NOPTS_VALUE)
                       pkt.pts = av_rescale_q(pkt.pts, aud_codec_context->time_base, audio_st->time_base);
                   if (pkt.dts != AV_NOPTS_VALUE)
                       pkt.dts = av_rescale_q(pkt.dts, aud_codec_context->time_base, audio_st->time_base);

                   av_write_frame(outctx, &amp;pkt);
                   av_packet_unref(&amp;pkt);
               }
               if (ret == -AVERROR(AVERROR_EOF))
                   break;
               else if (ret &lt; 0)
                   return ERROR_ENCODING_FRAME_RECEIVE;
           }

           av_write_trailer(outctx);

           return 0;
       }

       void cleanup()
       {
           if (aud_frame)
           {
               av_frame_free(&amp;aud_frame);
           }
           if (outctx)
           {
               for (int i = 0; i &lt; outctx->nb_streams; i++)
                   av_freep(&amp;outctx->streams[i]);

               avio_close(outctx->pb);
               av_free(outctx);
           }

           if (aud_codec_context)
           {
               avcodec_close(aud_codec_context);
               av_free(aud_codec_context);
           }
       }

       void fill_samples(float_t *dst, int nb_samples, int nb_channels, int sample_rate, float_t *t)
       {
           int i, j;
           float_t tincr = 1.0 / sample_rate;
           const float_t c = 2 * M_PI * 440.0;

           for (i = 0; i &lt; nb_samples; i++) {
               *dst = sin(c * *t);
               for (j = 1; j &lt; nb_channels; j++)
                   dst[j] = dst[0];
               dst += nb_channels;
               *t += tincr;
           }
       }

       int main()
       {
           int sec = 5;
           int frame_rate = 30;
           float t = 0, tincr = 0, tincr2 = 0;

           int src_samples_linesize;
           int src_nb_samples = 960;
           int src_channels = 2;
           int sample_rate = 48000;

           uint8_t **src_data = NULL;

           int ret;

           initialize_audio_only_encoding(48000, src_nb_samples, 192000, "sound_FLT_960");

           ret = av_samples_alloc_array_and_samples(&amp;src_data, &amp;src_samples_linesize, src_channels,
               src_nb_samples, AV_SAMPLE_FMT_FLT, 0);

           for (size_t i = 0; i &lt; frame_rate * sec; i++)
           {
                   fill_samples((float *)src_data[0], src_nb_samples, src_channels, sample_rate, &amp;t);
                   write_interleaved_audio_frame((float *)src_data[0]);
           }

           finish_audio_encoding();

           cleanup();

           return 0;
       }
    }
    </iterator></algorithm>

    And some of the files :

    The webm audio file that does not work (only in VLC) :
    https://drive.google.com/file/d/0B16rIXjPXJCqcU5HVllIYW1iODg/view?usp=sharing

    The ogg audio file that works :
    https://drive.google.com/file/d/0B16rIXjPXJCqMUZhbW0tTDFjT1E/view?usp=sharing

    Video and Audio file that only works in VLC : https://drive.google.com/file/d/0B16rIXjPXJCqX3pEN3B0QVlrekU/view?usp=sharing

    If a play the ogg file in FFPlay it says "aq= 30kb", but if I play the webm audio only file i get "aq= 0kb". So that does not seem right either.

    Any idea ? Thanks in advance !

    Edit : So I made it work by just encoding both VP8 and Opus into the ogg container and then simply renaming it to .webm and uploading it YouTube. I did not actually know ogg could have video inside of it. I do not really know what how it affects the encoding and stuff... I can upload the original ogg file with video and it also works on YouTube. But the whole reason I went for webm was the licensing it has (https://www.webmproject.org/license/)... So I’m a bit confused now.

    I need to read up on what exactly a "container" means in the context and what just changing the extension means.

    Any comments to shed some light on this appreciated !

  • Scale filter crashes with error when used from transcoding example

    27 juin 2017, par Vali

    I’ve modified a bit (just to compile in c++) this code example :
    https://github.com/FFmpeg/FFmpeg/blob/master/doc/examples/transcoding.c.

    What works : as is (null filter), a number of other filters like framerate, drawtext, ...

    What doesn’t work : scale filter when scaling down.

    I use the following syntax for scale ( I’ve tried many others also, same effect) :
    "scale=w=iw/2 :-1"

    The error is : "Input picture width (240) is greater than stride (128)" where the values for width and stride depend on the input.

    Misc environment info : windows, VS 2017, input example : rtsp ://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mov

    Any clue as to what I’m doing wrong ?

    Thanks !


    EDITED to add working code sample


    #pragma comment(lib, "avcodec.lib")
    #pragma comment(lib, "avutil.lib")
    #pragma comment(lib, "avformat.lib")
    #pragma comment(lib, "avfilter.lib")

    /*
    * Copyright (c) 2010 Nicolas George
    * Copyright (c) 2011 Stefano Sabatini
    * Copyright (c) 2014 Andrey Utkin
    *
    **** EDITED 2017 for testing (see original here: https://github.com/FFmpeg/FFmpeg/blob/master/doc/examples/transcoding.c)
    *
    * Permission is hereby granted, free of charge, to any person obtaining a copy
    * of this software and associated documentation files (the "Software"), to deal
    * in the Software without restriction, including without limitation the rights
    * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    * copies of the Software, and to permit persons to whom the Software is
    * furnished to do so, subject to the following conditions:
    *
    * The above copyright notice and this permission notice shall be included in
    * all copies or substantial portions of the Software.
    *
    * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
    * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    * THE SOFTWARE.
    */

    /**
    * @file
    * API example for demuxing, decoding, filtering, encoding and muxing
    * @example transcoding.c
    */

    extern "C"
    {
       #include <libavcodec></libavcodec>avcodec.h>
       #include <libavformat></libavformat>avformat.h>
       #include <libavfilter></libavfilter>avfiltergraph.h>
       #include <libavfilter></libavfilter>buffersink.h>
       #include <libavfilter></libavfilter>buffersrc.h>
       #include <libavutil></libavutil>opt.h>
       #include <libavutil></libavutil>pixdesc.h>
    }


    static AVFormatContext *ifmt_ctx;
    static AVFormatContext *ofmt_ctx;
    typedef struct FilteringContext {
       AVFilterContext *buffersink_ctx;
       AVFilterContext *buffersrc_ctx;
       AVFilterGraph *filter_graph;
    } FilteringContext;
    static FilteringContext *filter_ctx;

    typedef struct StreamContext {
       AVCodecContext *dec_ctx;
       AVCodecContext *enc_ctx;
    } StreamContext;
    static StreamContext *stream_ctx;

    static int open_input_file(const char *filename, int&amp; videoStreamIndex)
    {
       int ret;
       unsigned int i;

       ifmt_ctx = NULL;
       if ((ret = avformat_open_input(&amp;ifmt_ctx, filename, NULL, NULL)) &lt; 0) {
           av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
           return ret;
       }

       if ((ret = avformat_find_stream_info(ifmt_ctx, NULL)) &lt; 0) {
           av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
           return ret;
       }

       // Just need video
       videoStreamIndex = -1;
       for (unsigned int i = 0; i &lt; ifmt_ctx->nb_streams; i++)
       {
           if (ifmt_ctx->streams[i]->codecpar->codec_type != AVMEDIA_TYPE_VIDEO)
               continue;
           videoStreamIndex = i;
           break;
       }
       if (videoStreamIndex &lt; 0)
       {
           av_log(NULL, AV_LOG_ERROR, "Cannot find video stream\n");
           return videoStreamIndex;
       }


       stream_ctx = (StreamContext*)av_mallocz_array(ifmt_ctx->nb_streams, sizeof(*stream_ctx));
       if (!stream_ctx)
           return AVERROR(ENOMEM);

       for (i = 0; i &lt; ifmt_ctx->nb_streams; i++) {

           // Just need video
           if (i != videoStreamIndex)
               continue;


           AVStream *stream = ifmt_ctx->streams[i];
           AVCodec *dec = avcodec_find_decoder(stream->codecpar->codec_id);
           AVCodecContext *codec_ctx;
           if (!dec) {
               av_log(NULL, AV_LOG_ERROR, "Failed to find decoder for stream #%u\n", i);
               return AVERROR_DECODER_NOT_FOUND;
           }
           codec_ctx = avcodec_alloc_context3(dec);
           if (!codec_ctx) {
               av_log(NULL, AV_LOG_ERROR, "Failed to allocate the decoder context for stream #%u\n", i);
               return AVERROR(ENOMEM);
           }
           ret = avcodec_parameters_to_context(codec_ctx, stream->codecpar);
           if (ret &lt; 0) {
               av_log(NULL, AV_LOG_ERROR, "Failed to copy decoder parameters to input decoder context "
                   "for stream #%u\n", i);
               return ret;
           }
           /* Reencode video &amp; audio and remux subtitles etc. */
           if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
               || codec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
               if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
                   codec_ctx->framerate = av_guess_frame_rate(ifmt_ctx, stream, NULL);
               /* Open decoder */
               ret = avcodec_open2(codec_ctx, dec, NULL);
               if (ret &lt; 0) {
                   av_log(NULL, AV_LOG_ERROR, "Failed to open decoder for stream #%u\n", i);
                   return ret;
               }
           }
           stream_ctx[i].dec_ctx = codec_ctx;
       }

       av_dump_format(ifmt_ctx, 0, filename, 0);
       return 0;
    }

    static int open_output_file(const char *filename, const int videoStreamIndex)
    {
       AVStream *out_stream;
       AVStream *in_stream;
       AVCodecContext *dec_ctx, *enc_ctx;
       AVCodec *encoder;
       int ret;
       unsigned int i;

       ofmt_ctx = NULL;
       avformat_alloc_output_context2(&amp;ofmt_ctx, NULL, NULL, filename);
       if (!ofmt_ctx) {
           av_log(NULL, AV_LOG_ERROR, "Could not create output context\n");
           return AVERROR_UNKNOWN;
       }


       for (i = 0; i &lt; ifmt_ctx->nb_streams; i++) {
           // Just need video
           if (i != videoStreamIndex)
               continue;

           out_stream = avformat_new_stream(ofmt_ctx, NULL);
           if (!out_stream) {
               av_log(NULL, AV_LOG_ERROR, "Failed allocating output stream\n");
               return AVERROR_UNKNOWN;
           }

           in_stream = ifmt_ctx->streams[i];
           dec_ctx = stream_ctx[i].dec_ctx;

           if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
               /* in this example, we choose transcoding to same codec */
               encoder = avcodec_find_encoder(dec_ctx->codec_id);
               if (!encoder) {
                   av_log(NULL, AV_LOG_FATAL, "Necessary encoder not found\n");
                   return AVERROR_INVALIDDATA;
               }
               enc_ctx = avcodec_alloc_context3(encoder);
               if (!enc_ctx) {
                   av_log(NULL, AV_LOG_FATAL, "Failed to allocate the encoder context\n");
                   return AVERROR(ENOMEM);
               }

               /* In this example, we transcode to same properties (picture size,
               * sample rate etc.). These properties can be changed for output
               * streams easily using filters */
               enc_ctx->height = dec_ctx->height;
               enc_ctx->width = dec_ctx->width;
               enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio;
               /* take first format from list of supported formats */
               if (encoder->pix_fmts)
                   enc_ctx->pix_fmt = encoder->pix_fmts[0];
               else
                   enc_ctx->pix_fmt = dec_ctx->pix_fmt;

               /* video time_base can be set to whatever is handy and supported by encoder */
               //enc_ctx->time_base = av_inv_q(dec_ctx->framerate);
               enc_ctx->time_base = dec_ctx->time_base;


               /* Third parameter can be used to pass settings to encoder */
               ret = avcodec_open2(enc_ctx, encoder, NULL);
               if (ret &lt; 0) {
                   av_log(NULL, AV_LOG_ERROR, "Cannot open video encoder for stream #%u\n", i);
                   return ret;
               }
               ret = avcodec_parameters_from_context(out_stream->codecpar, enc_ctx);
               if (ret &lt; 0) {
                   av_log(NULL, AV_LOG_ERROR, "Failed to copy encoder parameters to output stream #%u\n", i);
                   return ret;
               }
               if (ofmt_ctx->oformat->flags &amp; AVFMT_GLOBALHEADER)
                   enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;

               out_stream->time_base = enc_ctx->time_base;
               stream_ctx[i].enc_ctx = enc_ctx;
           }
           else if (dec_ctx->codec_type == AVMEDIA_TYPE_UNKNOWN) {
               av_log(NULL, AV_LOG_FATAL, "Elementary stream #%d is of unknown type, cannot proceed\n", i);
               return AVERROR_INVALIDDATA;
           }
           else {
               /* if this stream must be remuxed */
               ret = avcodec_parameters_copy(out_stream->codecpar, in_stream->codecpar);
               if (ret &lt; 0) {
                   av_log(NULL, AV_LOG_ERROR, "Copying parameters for stream #%u failed\n", i);
                   return ret;
               }
               out_stream->time_base = in_stream->time_base;
           }

       }
       av_dump_format(ofmt_ctx, 0, filename, 1);

       if (!(ofmt_ctx->oformat->flags &amp; AVFMT_NOFILE)) {
           ret = avio_open(&amp;ofmt_ctx->pb, filename, AVIO_FLAG_WRITE);
           if (ret &lt; 0) {
               av_log(NULL, AV_LOG_ERROR, "Could not open output file '%s'", filename);
               return ret;
           }
       }

       /* init muxer, write output file header */
       ret = avformat_write_header(ofmt_ctx, NULL);
       if (ret &lt; 0) {
           av_log(NULL, AV_LOG_ERROR, "Error occurred when opening output file\n");
           return ret;
       }

       return 0;
    }

    static int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx,
       AVCodecContext *enc_ctx, const char *filter_spec)
    {
       char args[512];
       int ret = 0;
       AVFilter *buffersrc = NULL;
       AVFilter *buffersink = NULL;
       AVFilterContext *buffersrc_ctx = NULL;
       AVFilterContext *buffersink_ctx = NULL;
       AVFilterInOut *outputs = avfilter_inout_alloc();
       AVFilterInOut *inputs = avfilter_inout_alloc();
       AVFilterGraph *filter_graph = avfilter_graph_alloc();

       if (!outputs || !inputs || !filter_graph) {
           ret = AVERROR(ENOMEM);
           goto end;
       }

       if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
           buffersrc = avfilter_get_by_name("buffer");
           buffersink = avfilter_get_by_name("buffersink");
           if (!buffersrc || !buffersink) {
               av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
               ret = AVERROR_UNKNOWN;
               goto end;
           }

           snprintf(args, sizeof(args),
               "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
               dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
               dec_ctx->time_base.num, dec_ctx->time_base.den,
               dec_ctx->sample_aspect_ratio.num,
               dec_ctx->sample_aspect_ratio.den);

           ret = avfilter_graph_create_filter(&amp;buffersrc_ctx, buffersrc, "in",
               args, NULL, filter_graph);
           if (ret &lt; 0) {
               av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
               goto end;
           }

           ret = avfilter_graph_create_filter(&amp;buffersink_ctx, buffersink, "out",
               NULL, NULL, filter_graph);
           if (ret &lt; 0) {
               av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
               goto end;
           }

           ret = av_opt_set_bin(buffersink_ctx, "pix_fmts",
               (uint8_t*)&amp;enc_ctx->pix_fmt, sizeof(enc_ctx->pix_fmt),
               AV_OPT_SEARCH_CHILDREN);
           if (ret &lt; 0) {
               av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
               goto end;
           }
       }
       else {
           ret = AVERROR_UNKNOWN;
           goto end;
       }

       /* Endpoints for the filter graph. */
       outputs->name = av_strdup("in");
       outputs->filter_ctx = buffersrc_ctx;
       outputs->pad_idx = 0;
       outputs->next = NULL;

       inputs->name = av_strdup("out");
       inputs->filter_ctx = buffersink_ctx;
       inputs->pad_idx = 0;
       inputs->next = NULL;

       if (!outputs->name || !inputs->name) {
           ret = AVERROR(ENOMEM);
           goto end;
       }

       if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_spec,
           &amp;inputs, &amp;outputs, NULL)) &lt; 0)
           goto end;

       if ((ret = avfilter_graph_config(filter_graph, NULL)) &lt; 0)
           goto end;

       /* Fill FilteringContext */
       fctx->buffersrc_ctx = buffersrc_ctx;
       fctx->buffersink_ctx = buffersink_ctx;
       fctx->filter_graph = filter_graph;

    end:
       avfilter_inout_free(&amp;inputs);
       avfilter_inout_free(&amp;outputs);

       return ret;
    }

    static int init_filters(const int videoStreamIndex)
    {
       const char *filter_spec;
       unsigned int i;
       int ret;
       filter_ctx = (FilteringContext*)av_malloc_array(ifmt_ctx->nb_streams, sizeof(*filter_ctx));
       if (!filter_ctx)
           return AVERROR(ENOMEM);

       for (i = 0; i &lt; ifmt_ctx->nb_streams; i++) {

           // Just video
           if (i != videoStreamIndex)
               continue;

           filter_ctx[i].buffersrc_ctx = NULL;
           filter_ctx[i].buffersink_ctx = NULL;
           filter_ctx[i].filter_graph = NULL;
           if (!(ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO
               || ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO))
               continue;

           filter_spec = "null"; /* passthrough (dummy) filter for video */
           //filter_spec = "scale=w=iw/2:-1";
           // filter_spec = "drawtext=fontfile=FreeSerif.ttf: text='%{localtime}': x=w-text_w: y=0: fontsize=24: fontcolor=yellow@1.0: box=1: boxcolor=red@1.0";
           // filter_spec = "drawtext=fontfile=FreeSerif.ttf :text='test': x=w-text_w: y=text_h: fontsize=24: fontcolor=yellow@1.0: box=1: boxcolor=red@1.0";

           ret = init_filter(&amp;filter_ctx[i], stream_ctx[i].dec_ctx,
               stream_ctx[i].enc_ctx, filter_spec);
           if (ret)
               return ret;
       }
       return 0;
    }

    static int encode_write_frame(AVFrame *filt_frame, unsigned int stream_index, int *got_frame, const int videoStreamIndex) {

       // Just video
       if (stream_index != videoStreamIndex)
           return 0;

       int ret;
       int got_frame_local;
       AVPacket enc_pkt;
       int(*enc_func)(AVCodecContext *, AVPacket *, const AVFrame *, int *) =
           (ifmt_ctx->streams[stream_index]->codecpar->codec_type ==
               AVMEDIA_TYPE_VIDEO) ? avcodec_encode_video2 : avcodec_encode_audio2;

       if (!got_frame)
           got_frame = &amp;got_frame_local;

       // av_log(NULL, AV_LOG_INFO, "Encoding frame\n");
       /* encode filtered frame */
       enc_pkt.data = NULL;
       enc_pkt.size = 0;
       av_init_packet(&amp;enc_pkt);

       ret = enc_func(stream_ctx[stream_index].enc_ctx, &amp;enc_pkt,
           filt_frame, got_frame);

       av_frame_free(&amp;filt_frame);
       if (ret &lt; 0)
           return ret;
       if (!(*got_frame))
           return 0;

       /* prepare packet for muxing */
       /*enc_pkt.stream_index = stream_index;
       av_packet_rescale_ts(&amp;enc_pkt, stream_ctx[stream_index].enc_ctx->time_base, ofmt_ctx->streams[stream_index]->time_base);*/
       enc_pkt.stream_index = 0;
       av_packet_rescale_ts(&amp;enc_pkt, stream_ctx[stream_index].enc_ctx->time_base, ofmt_ctx->streams[0]->time_base);

       av_log(NULL, AV_LOG_DEBUG, "Muxing frame\n");
       /* mux encoded frame */
       ret = av_interleaved_write_frame(ofmt_ctx, &amp;enc_pkt);
       return ret;
    }

    static int filter_encode_write_frame(AVFrame *frame, unsigned int stream_index, const int videoStreamIndex)
    {
       // Just video, all else crashes
       if (stream_index != videoStreamIndex)
           return 0;

       int ret;
       AVFrame *filt_frame;

       // av_log(NULL, AV_LOG_INFO, "Pushing decoded frame to filters\n");
       /* push the decoded frame into the filtergraph */
       ret = av_buffersrc_add_frame_flags(filter_ctx[stream_index].buffersrc_ctx,
           frame, 0);
       if (ret &lt; 0) {
           av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
           return ret;
       }

       /* pull filtered frames from the filtergraph */
       while (1) {
           filt_frame = av_frame_alloc();
           if (!filt_frame) {
               ret = AVERROR(ENOMEM);
               break;
           }
           // av_log(NULL, AV_LOG_INFO, "Pulling filtered frame from filters\n");
           ret = av_buffersink_get_frame(filter_ctx[stream_index].buffersink_ctx,
               filt_frame);
           if (ret &lt; 0) {
               /* if no more frames for output - returns AVERROR(EAGAIN)
               * if flushed and no more frames for output - returns AVERROR_EOF
               * rewrite retcode to 0 to show it as normal procedure completion
               */
               if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
                   ret = 0;
               av_frame_free(&amp;filt_frame);
               break;
           }

           filt_frame->pict_type = AV_PICTURE_TYPE_NONE;
           ret = encode_write_frame(filt_frame, stream_index, NULL, videoStreamIndex);
           if (ret &lt; 0)
               break;
       }

       return ret;
    }

    static int flush_encoder(unsigned int stream_index, const int videoStreamIndex)
    {
       int ret;
       int got_frame;

       // Just video
       if (stream_index != videoStreamIndex)
           return 0;

       if (!(stream_ctx[stream_index].enc_ctx->codec->capabilities &amp;
           AV_CODEC_CAP_DELAY))
           return 0;

       while (1) {
           av_log(NULL, AV_LOG_INFO, "Flushing stream #%u encoder\n", stream_index);
           ret = encode_write_frame(NULL, stream_index, &amp;got_frame, videoStreamIndex);
           if (ret &lt; 0)
               break;
           if (!got_frame)
               return 0;
       }
       return ret;
    }


    #include <vector>

    int main(int argc, char **argv)
    {
       int ret;

       AVPacket packet;
       packet.data = NULL;
       packet.size = 0;

       AVFrame *frame = NULL;
       enum AVMediaType type;
       unsigned int stream_index;
       unsigned int i;
       int got_frame;
       int(*dec_func)(AVCodecContext *, AVFrame *, int *, const AVPacket *);


    #ifdef _DEBUG
       // Hardcoded arguments
       std::vector varguments;
       {
           varguments.push_back(argv[0]);

           // Source
           varguments.push_back("./big_buck_bunny_short.mp4 ");

           // Destination
           varguments.push_back("./big_buck_bunny_short-processed.mp4");
       }

       char** arguments = new char*[varguments.size()];
       for (unsigned int i = 0; i &lt; varguments.size(); i++)
       {
           arguments[i] = varguments[i];
       }
       argc = varguments.size();
       argv = arguments;
    #endif // _DEBUG


       if (argc != 3) {
           av_log(NULL, AV_LOG_ERROR, "Usage: %s <input file="file" /> <output file="file">\n", argv[0]);
           return 1;
       }

       av_register_all();
       avfilter_register_all();

       int videoStreamIndex = -1;
       if ((ret = open_input_file(argv[1], videoStreamIndex)) &lt; 0)
           goto end;
       if ((ret = open_output_file(argv[2], videoStreamIndex)) &lt; 0)
           goto end;
       if ((ret = init_filters(videoStreamIndex)) &lt; 0)
           goto end;

       // Stop after a couple of frames
       int framesToGet = 100;

       /* read all packets */
       //while (framesToGet--)
       while(1)
       {
           if ((ret = av_read_frame(ifmt_ctx, &amp;packet)) &lt; 0)
               break;
           stream_index = packet.stream_index;

           // I just need video
           if (stream_index != videoStreamIndex) {
               av_packet_unref(&amp;packet);
               continue;
           }

           type = ifmt_ctx->streams[packet.stream_index]->codecpar->codec_type;
           av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n",
               stream_index);

           if (filter_ctx[stream_index].filter_graph) {
               av_log(NULL, AV_LOG_DEBUG, "Going to reencode&amp;filter the frame\n");
               frame = av_frame_alloc();
               if (!frame) {
                   ret = AVERROR(ENOMEM);
                   break;
               }
               av_packet_rescale_ts(&amp;packet,
                   ifmt_ctx->streams[stream_index]->time_base,
                   stream_ctx[stream_index].dec_ctx->time_base);
               dec_func = (type == AVMEDIA_TYPE_VIDEO) ? avcodec_decode_video2 :
                   avcodec_decode_audio4;
               ret = dec_func(stream_ctx[stream_index].dec_ctx, frame,
                   &amp;got_frame, &amp;packet);
               if (ret &lt; 0) {
                   av_frame_free(&amp;frame);
                   av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
                   break;
               }

               if (got_frame) {
                   frame->pts = frame->best_effort_timestamp;
                   ret = filter_encode_write_frame(frame, stream_index, videoStreamIndex);
                   av_frame_free(&amp;frame);
                   if (ret &lt; 0)
                       goto end;
               }
               else {
                   av_frame_free(&amp;frame);
               }
           }
           else {
               /* remux this frame without reencoding */
               av_packet_rescale_ts(&amp;packet,
                   ifmt_ctx->streams[stream_index]->time_base,
                   ofmt_ctx->streams[stream_index]->time_base);

               ret = av_interleaved_write_frame(ofmt_ctx, &amp;packet);
               if (ret &lt; 0)
                   goto end;
           }
           av_packet_unref(&amp;packet);
       }

       /* flush filters and encoders */
       for (i = 0; i &lt; ifmt_ctx->nb_streams; i++) {
           /* flush filter */
           if (!filter_ctx[i].filter_graph)
               continue;
           ret = filter_encode_write_frame(NULL, i, videoStreamIndex);
           if (ret &lt; 0) {
               av_log(NULL, AV_LOG_ERROR, "Flushing filter failed\n");
               goto end;
           }

           /* flush encoder */
           ret = flush_encoder(i, videoStreamIndex);
           if (ret &lt; 0) {
               av_log(NULL, AV_LOG_ERROR, "Flushing encoder failed\n");
               goto end;
           }
       }

       av_write_trailer(ofmt_ctx);
    end:
       av_packet_unref(&amp;packet);
       av_frame_free(&amp;frame);
       for (i = 0; i &lt; ifmt_ctx->nb_streams; i++) {
           // Just video
           if (i != videoStreamIndex)
               continue;
           avcodec_free_context(&amp;stream_ctx[i].dec_ctx);
           if (ofmt_ctx &amp;&amp; ofmt_ctx->nb_streams > i &amp;&amp; ofmt_ctx->streams[i] &amp;&amp; stream_ctx[i].enc_ctx)
               avcodec_free_context(&amp;stream_ctx[i].enc_ctx);
           if (filter_ctx &amp;&amp; filter_ctx[i].filter_graph)
               avfilter_graph_free(&amp;filter_ctx[i].filter_graph);
       }
       av_free(filter_ctx);
       av_free(stream_ctx);
       avformat_close_input(&amp;ifmt_ctx);
       if (ofmt_ctx &amp;&amp; !(ofmt_ctx->oformat->flags &amp; AVFMT_NOFILE))
           avio_closep(&amp;ofmt_ctx->pb);
       avformat_free_context(ofmt_ctx);

       /*if (ret &lt; 0)
           av_log(NULL, AV_LOG_ERROR, "Error occurred: %s\n", av_err2str(ret));*/

       return ret ? 1 : 0;
    }
    </output></vector>