Newest 'ffmpeg' Questions - Stack Overflow

http://stackoverflow.com/questions/tagged/ffmpeg

Les articles publiés sur le site

  • How to reconnect using avformat_open_input without having to alloc the decoder again ?

    26 septembre 2012, par Jona

    Currently, I have a code based on ffplay to stream live content.

    One thing I'm looking to do is be able to reconnect upon loosing a connection without having to shutdown the whole decoding process.

    To me the solution is alloc the decoder myself once and keep using it across reconnections. I can't seem to figure out how to setup a decoder without having to depend on the AVFormatContext. Right now my code is failing when trying to use my own allocated AVCodecContext to decode. But it doesn't fail if I use the AVCodecContext given by AVFormatContext.

    This is part of my initial code:

    // attempt to find more information about the codec
    // also it will open the codecs needed.
    fferror = avformat_find_stream_info(ic, NULL);
    if (0 > fferror)
    {
        // TODO verify type of error to better map it to our errors
        error = ERROR_FAIL_TO_CONNECT;
        LOGE("download() -> avformat_find_stream_info failed! fferror:%d, error:%d", fferror, error);
        goto fail;
    }
    
    AVCodec *dec;
    // select the audio stream
    int ret = av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO, -1, -1, &dec, 0);
    if (0 > ret) {
        error = ERROR_UNEXPECTED_ERROR;
        LOGE("download() -> av_find_best_stream failed! ret:%d, error:%d", ret, error);
        goto fail;
    }
    
    LOGI("download() -> STREAM: nb_streams:%d", ic->nb_streams);
    LOGI("download() -> STREAM: audio format:%s", ic->iformat->name);
    LOGI("download() -> STREAM: audio bitrate:%d", ic->bit_rate);
    
    
    // save the audio stream index and source
    is->audio_stream_index = ret;
    is->audio_st = ic->streams[is->audio_stream_index];
    is->audio_buf_size = 0;
    is->audio_buf_index = 0;
    
    is->audio_st->discard = AVDISCARD_DEFAULT;
    
    if(ic->pb) {
        ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
    }
    
    if (show_status) {
            av_dump_format(ic, 0, is->filename, 0);
        }
    
    // open codec
        error = open_decoder(is->audio_st->codec, dec);
        if (ERROR_NO_ERROR != error) {
            LOGE("receive_thread() -> open_decoder failed! error:%d", error);
            goto fail;
        }
    

    And this is the funtion that initializes the decoder.

    static int open_decoder (AVCodecContext *avctx, AVCodec *codec)
    {
        int fferror         = 0;
        AVCodecContext *c   = NULL;
    
        if (smDecoder.open) {
            LOGW("open_decoder() -> decoder is already open!");
            return ERROR_NO_ERROR;
        }
    
        // find the decoder
        if (!codec)
        {
            codec = avcodec_find_decoder(avctx->codec_id);
            if (!codec)
            {
                LOGE("open_decoder() -> avcodec_find_decoder failed!");
                return ERROR_UNEXPECTED_ERROR;
            }
        }
    
        // allocate the decoder av context
        c = avcodec_alloc_context3(codec);
        if (NULL == c) {
            LOGE("open_decoder() -> avcodec_alloc_context3 failed! Out of memory?");
            return ERROR_UNEXPECTED_ERROR;
        }
    
        // check if the type of codec we support
        if (AVMEDIA_TYPE_AUDIO != c->codec_type)
        {
            LOGE("open_decoder() -> codec_type not supported! codec_type:%d",c->codec_type);
            return ERROR_UNEXPECTED_ERROR;
        }
    
        // set the proper channels if not properly set
        if (c->channels > 0) {
            c->request_channels = FFMIN(2, c->channels);
        } else {
            c->request_channels = 2;
        }
    
        c->debug_mv = 0;
        c->debug = 0;
        c->workaround_bugs = workaround_bugs;
        c->idct_algo= idct;
        if(fast) c->flags2 |= CODEC_FLAG2_FAST;
        c->error_concealment= error_concealment;
        c->thread_count = thread_count;
    
        // open the decoder
        fferror = avcodec_open2(avctx, codec, NULL);
        if (fferror < 0)
        {
            LOGE("open_decoder() -> avcodec_open2 failed! fferror:%d", fferror);
            return ERROR_UNEXPECTED_ERROR;
        }
    
        // clean up our reusable packet
        memset(&smDecoder.audio_pkt, 0, sizeof(smDecoder.audio_pkt));
    
        smDecoder.open = 1;
        smDecoder.codec = codec;
        smDecoder.avctx = c;
    
        return ERROR_NO_ERROR;
    }
    
  • ffmpeg result to a tempfile

    26 septembre 2012, par user1165201

    I am new to python and ffmpeg. I have a following question to ask.

    If I run the following command from command-line and it works.

    ffmpeg -i  1.flv  temp_filename
    

    If I put it in a program

       temp_file_handle, temp_filename = tempfile.mkstemp('.flv')
    
       command = "ffmpeg -i " + newvideo.location + " "+ temp_filename
    
       out = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
       res = out.communicate()
    

    The generated video didn't write to the tem_filename. Why?

  • ffmpeg keyframe extraction

    26 septembre 2012, par Shikhar Shrivastav

    I have been trying to extract keyframes from video using ffmpeg 0.11.1 . So far all the commands I have tried do not extract keyframes but return all the frames ie 25fps*total time number of frames in the output. I tried setting the keyint_min as 25 to make sure there is a amximum of 1 keyframe per second.

    ffmpeg -vf select="eq(pict_type\,PICT_TYPE_I)" -g 250 -keyint_min 25 -i C:\test.mp4 -vsync 2 -f image2 C:\testTemp\thumbnails-%02d.jpeg
    

    But still all the frames are returned.

    Then i tried, to separate the keyframes by 20 seconds.

    ffmpeg -i C:\test.mp4 -vf select='eq(pict_type\,I)*(isnan(prev_selected_t)+gte(t-prev_selected_t\,20))' -vsync 0 -f image2 C:\testTemp\%09d.jpg
    

    Again same result, all the frames are returned.

    What should I do?

  • Encode image to video using ffmpeg (sws_scale)

    26 septembre 2012, par bahar_p

    I'm trying to encode an image to video using ffmpeg library. I have these global params:

    //Global params
    AVCodec         *codec;
    AVCodecContext  *codecCtx;
    uint8_t         *output_buffer;
    int             output_buffer_size;
    

    I divided the encoding to 3 methods: Initialize the encoder:

    jint Java_com_camera_simpledoublewebcams2_CameraPreview_initencoder(JNIEnv* env,jobject thiz){
    avcodec_register_all();
    avcodec_init();
    av_register_all();
    
    int fps = 30;
    
    /* find the H263 video encoder */
    codec = avcodec_find_encoder(CODEC_ID_H263);
    if (!codec) {
        LOGI("avcodec_find_encoder() run fail.");
        return -5;
    }
    
    //allocate context
    codecCtx = avcodec_alloc_context();
    
    /* put sample parameters */
    codecCtx->bit_rate = 400000;
    /* resolution must be a multiple of two */
    codecCtx->width = 176;
    codecCtx->height = 144;
    /* frames per second */
    codecCtx->time_base = (AVRational){1,fps};
    codecCtx->pix_fmt = PIX_FMT_YUV420P;
    codecCtx->codec_id = CODEC_ID_H263;
    codecCtx->codec_type = AVMEDIA_TYPE_VIDEO;
    
    /* open it */
    if (avcodec_open(codecCtx, codec) < 0) {
        LOGI("avcodec_open() run fail.");
        return -10;
    }
    
    //init buffer
    output_buffer_size = 500000;
    output_buffer = malloc(output_buffer_size);
    
    return 0;
    

    }

    Encoding the image:

    jint Java_com_camera_simpledoublewebcams2_CameraPreview_encodejpeg(JNIEnv* env,jobject thiz,jchar* cImage, jint imageSize){
    int             out_size;
    AVFrame         *picture;
    AVFrame         *outpic;
    uint8_t         *outbuffer;
    
    //allocate frame    
    picture = avcodec_alloc_frame();    
    outpic = avcodec_alloc_frame();
    
    int nbytes = avpicture_get_size(PIX_FMT_YUV420P, codecCtx->width, codecCtx->height);
    outbuffer = (uint8_t*)av_malloc(nbytes);
    outpic->pts = 0;
    
    //fill picture with image
    avpicture_fill((AVPicture*)picture, (uint8_t*)cImage, PIX_FMT_RGBA, codecCtx->width, codecCtx->height);
    //fill outpic with empty image
    avpicture_fill((AVPicture*)outpic, outbuffer, PIX_FMT_YUV420P, codecCtx->width, codecCtx->height);
    
    //rescale the image 
    struct SwsContext* fooContext = sws_getContext(codecCtx->width, codecCtx->height, 
                                                           PIX_FMT_RGBA, 
                                                           codecCtx->width, codecCtx->height, 
                                                           PIX_FMT_YUV420P, 
                                                           SWS_FAST_BILINEAR, NULL, NULL, NULL);
    sws_scale(fooContext, picture->data, picture->linesize, 0, codecCtx->height, outpic->data, outpic->linesize);   
    
    //encode the image
    out_size = avcodec_encode_video(codecCtx, output_buffer, output_buffer_size, outpic);
    out_size += avcodec_encode_video(codecCtx, output_buffer, output_buffer_size, outpic);
    
    //release pictures
    av_free(outbuffer);
    av_free(picture);
    av_free(outpic);
    
    return out_size;
    

    }

    And closing the encoder:

    void Java_com_camera_simpledoublewebcams2_CameraPreview_closeencoder(JNIEnv* env,jobject thiz){
    free(output_buffer);
    avcodec_close(codecCtx);
    av_free(codecCtx);
    

    }

    When I send the first image, I get a result from the encoder. When I try to send another image the program crashes. I tried calling init once and then the images, then the close - didn't work. I tried calling the init and the close for every image - didn't work.

    Any suggestions?

    Thanks!

    EDIT: After further research I found that the problem is at sws_scale method. Still don't know what is causing this issue...

  • Compiling ffmpeg for use in Android (ndk) for x86 architecture

    26 septembre 2012, par Ankur22

    I am trying to compile ffmpeg for Android but for the x86 architecture. I've successfully compiled an arm .so and it all works, and I though that by replacing arm with x86 in the relevant places within the build script would have done the trick. Unfortunately that's not the case and I get some strange things occurring.

    First I get this while it's compiling ffmpeg:

    /home/ankur/android-ndk-r8/platforms/android-8/arch-arm//usr/include/strings.h:49: warning: redundant redeclaration of 'index'
    

    Notice the arch-arm instead of arch-x86. Finally I get this:

    /home/ankur/android-ndk-r8/toolchains/x86-4.4.3/prebuilt/linux-x86/bin/i686-android-linux-ld: libavcodec/libavcodec.a(4xm.o): Relocations in generic ELF (EM: 40)
    /home/ankur/android-ndk-r8/toolchains/x86-4.4.3/prebuilt/linux-x86/bin/i686-android-linux-ld: libavcodec/libavcodec.a(4xm.o): Relocations in generic ELF (EM: 40)
    /home/ankur/android-ndk-r8/toolchains/x86-4.4.3/prebuilt/linux-x86/bin/i686-android-linux-ld: libavcodec/libavcodec.a(4xm.o): Relocations in generic ELF (EM: 40)
    libavcodec/libavcodec.a(4xm.o): could not read symbols: File in wrong format
    

    The build script I've created looks like this:

    #!/bin/bash
    
    NDK=~/android-ndk-r8
    PLATFORM=$NDK/platforms/android-8/arch-x86/
    PREBUILT=$NDK/toolchains/x86-4.4.3/prebuilt/linux-x86
    function build_one_r8
    {
    ./configure \
        --disable-shared \
        --enable-static \
        --enable-gpl \
        --enable-version3 \
        --enable-nonfree \
        --disable-doc \
        --disable-ffmpeg \
        --disable-ffplay \
        --disable-ffprobe \
        --disable-ffserver \
        --disable-avdevice \
        --disable-avfilter \
        --disable-postproc \
        --enable-small \
        --cross-prefix=$PREBUILT/bin/i686-android-linux- \
        --enable-cross-compile \
        --target-os=linux \
        --extra-cflags="-I$PLATFORM/usr/include" \
        --arch=x86 \
        --disable-symver \
        --disable-debug \
        --disable-stripping \
        $ADDITIONAL_CONFIGURE_FLAG
    sed -i 's/HAVE_LRINT 0/HAVE_LRINT 1/g' config.h
    sed -i 's/HAVE_LRINTF 0/HAVE_LRINTF 1/g' config.h
    sed -i 's/HAVE_ROUND 0/HAVE_ROUND 1/g' config.h
    sed -i 's/HAVE_ROUNDF 0/HAVE_ROUNDF 1/g' config.h
    sed -i 's/HAVE_TRUNC 0/HAVE_TRUNC 1/g' config.h
    sed -i 's/HAVE_TRUNCF 0/HAVE_TRUNCF 1/g' config.h
    make clean
    make  -j4 install
    $PREBUILT/bin/i686-android-linux-ar d libavcodec/libavcodec.a inverse.o
    $PREBUILT/bin/i686-android-linux-ld -rpath-link=$PLATFORM/usr/lib -L$PLATFORM/usr/lib  -soname libffmpeg.so -shared -nostdlib  -z,noexecstack -Bsymbolic --whole-archive --no-undefined -o $PREFIX/libffmpeg.so libavcodec/libavcodec.a libavformat/libavformat.a libavutil/libavutil.a libswscale/libswscale.a -lc -lm -lz -ldl -llog  --warn-once  --dynamic-linker=/system/bin/linker $PREBUILT/lib/gcc/i686-android-linux/4.4.3/libgcc.a
    }
    function build_one_r8_2
    {
    $PREBUILT/bin/i686-android-linux-ar d libavcodec/libavcodec.a inverse.o
    $PREBUILT/bin/i686-android-linux-ld -rpath-link=$PLATFORM/usr/lib -L$PLATFORM/usr/lib  -soname libffmpeg.so -shared -nostdlib  -z,noexecstack -Bsymbolic --whole-archive --no-undefined -o $PREFIX/libffmpeg.so libavcodec/libavcodec.a libavformat/libavformat.a libavutil/libavutil.a libswscale/libswscale.a -lc -lm -lz -ldl -llog  --warn-once  --dynamic-linker=/system/bin/linker $PREBUILT/lib/gcc/i686-android-linux/4.4.3/libgcc.a
    }
    #x86
    CPU=x86
    OPTIMIZE_CFLAGS="-march=$CPU "
    PREFIX=./android/$CPU
    ADDITIONAL_CONFIGURE_FLAG=
    build_one_r8
    

    I hope I'm doing something wrong rather than this not being possible.

    Thanks!