Recherche avancée

Médias (1)

Mot : - Tags -/stallman

Autres articles (81)

  • Emballe médias : à quoi cela sert ?

    4 février 2011, par

    Ce plugin vise à gérer des sites de mise en ligne de documents de tous types.
    Il crée des "médias", à savoir : un "média" est un article au sens SPIP créé automatiquement lors du téléversement d’un document qu’il soit audio, vidéo, image ou textuel ; un seul document ne peut être lié à un article dit "média" ;

  • Gestion des droits de création et d’édition des objets

    8 février 2011, par

    Par défaut, beaucoup de fonctionnalités sont limitées aux administrateurs mais restent configurables indépendamment pour modifier leur statut minimal d’utilisation notamment : la rédaction de contenus sur le site modifiables dans la gestion des templates de formulaires ; l’ajout de notes aux articles ; l’ajout de légendes et d’annotations sur les images ;

  • Supporting all media types

    13 avril 2011, par

    Unlike most software and media-sharing platforms, MediaSPIP aims to manage as many different media types as possible. The following are just a few examples from an ever-expanding list of supported formats : images : png, gif, jpg, bmp and more audio : MP3, Ogg, Wav and more video : AVI, MP4, OGV, mpg, mov, wmv and more text, code and other data : OpenOffice, Microsoft Office (Word, PowerPoint, Excel), web (html, CSS), LaTeX, Google Earth and (...)

Sur d’autres sites (3541)

  • How to concat mp4 files using libffmpeg in c program ?

    1er août 2013, par chichien

    I know ffmpeg command line is easy, but how to programmatically implement? I'm not good at this,here is some code from internet, it is used to convert .mp4 to .ts,and i made some changes,but the audio stream problem persists:

    #include
    #include
    #include
    #include

    #include "libavformat/avformat.h"
    #include "libavcodec/avcodec.h"
    #include "libavutil/avutil.h"
    #include "libavutil/rational.h"
    #include "libavdevice/avdevice.h"
    #include "libavutil/mathematics.h"
    #include "libswscale/swscale.h"

    static AVStream* add_output_stream(AVFormatContext* output_format_context, AVStream* input_stream)
    {

       AVCodecContext* input_codec_context = NULL;
       AVCodecContext* output_codec_context = NULL;

       AVStream* output_stream = NULL;
       output_stream = av_new_stream(output_format_context, 0);
       if (!output_stream)
       {
           printf("Call av_new_stream function failed\n");
           return NULL;
       }

       input_codec_context = input_stream->codec;
       output_codec_context = output_stream->codec;

       output_codec_context->codec_id = input_codec_context->codec_id;
       output_codec_context->codec_type = input_codec_context->codec_type;
       output_codec_context->codec_tag = input_codec_context->codec_tag;
       output_codec_context->bit_rate = input_codec_context->bit_rate;
       output_codec_context->extradata = input_codec_context->extradata;
       output_codec_context->extradata_size = input_codec_context->extradata_size;

       if (av_q2d(input_codec_context->time_base) * input_codec_context->ticks_per_frame > av_q2d(input_stream->time_base) && av_q2d(input_stream->time_base) < 1.0 / 1000)
       {
           output_codec_context->time_base = input_codec_context->time_base;
           output_codec_context->time_base.num *= input_codec_context->ticks_per_frame;
       }
       else
       {
           output_codec_context->time_base = input_stream->time_base;
       }
       switch (input_codec_context->codec_type)
       {
       case AVMEDIA_TYPE_AUDIO:
           output_codec_context->channel_layout = input_codec_context->channel_layout;
           output_codec_context->sample_rate = input_codec_context->sample_rate;
           output_codec_context->channels = input_codec_context->channels;
           output_codec_context->frame_size = input_codec_context->frame_size;
           if ((input_codec_context->block_align == 1 && input_codec_context->codec_id == CODEC_ID_MP3) || input_codec_context->codec_id == CODEC_ID_AC3)
           {
               output_codec_context->block_align = 0;
           }
           else
           {
               output_codec_context->block_align = input_codec_context->block_align;
           }
           break;
       case AVMEDIA_TYPE_VIDEO:
           output_codec_context->pix_fmt = input_codec_context->pix_fmt;
           output_codec_context->width = input_codec_context->width;
           output_codec_context->height = input_codec_context->height;
           output_codec_context->has_b_frames = input_codec_context->has_b_frames;
           if (output_format_context->oformat->flags & AVFMT_GLOBALHEADER)
           {
               output_codec_context->flags |= CODEC_FLAG_GLOBAL_HEADER;
           }
           break;
       default:
           break;
       }

       return output_stream;
    }

    //[[** from ffmpeg.c
    static void write_frame(AVFormatContext *s, AVPacket *pkt, AVCodecContext *avctx, AVBitStreamFilterContext *bsfc){
       int ret;

       while(bsfc){
           AVPacket new_pkt= *pkt;
           int a= av_bitstream_filter_filter(bsfc, avctx, NULL,
                                             &new_pkt.data, &new_pkt.size,
                                             pkt->data, pkt->size,
                                             pkt->flags & AV_PKT_FLAG_KEY);
           if(a>0){
               av_free_packet(pkt);
               new_pkt.destruct= av_destruct_packet;
           } else if(a<0){
               fprintf(stderr, "%s failed for stream %d, codec %s\n",
                       bsfc->filter->name, pkt->stream_index,
                       avctx->codec ? avctx->codec->name : "copy");
               //print_error("", a);
               //if (exit_on_error)
               //    ffmpeg_exit(1);
           }
           *pkt= new_pkt;

           bsfc= bsfc->next;
       }

       ret= av_interleaved_write_frame(s, pkt);
       if(ret < 0){
           //print_error("av_interleaved_write_frame()", ret);
           fprintf(stderr, "av_interleaved_write_frame(%d)\n", ret);
           exit(1);
       }
    }
    //]]**

    int main(int argc, char* argv[])
    {

       const char* input;
       const char* output;
       const char* output_prefix = NULL;
       char* segment_duration_check = 0;
       const char* index = NULL;
       char* tmp_index = NULL;
       const char* http_prefix = NULL;
       long max_tsfiles = NULL;
       double prev_segment_time = 0;
       double segment_duration = 0;

       AVInputFormat* ifmt = NULL;
       AVOutputFormat* ofmt = NULL;
       AVFormatContext* ic = NULL;
       AVFormatContext* oc = NULL;
       AVStream* video_st = NULL;
       AVStream* audio_st = NULL;
       AVCodec* codec = NULL;
       AVDictionary* pAVDictionary = NULL;

       long frame_count = 0;

       if (argc != 3) {
           fprintf(stderr, "Usage: %s inputfile outputfile\n", argv[0]);
           exit(1);
       }

       input = argv[1];
       output = argv[2];

       av_register_all();

       char szError[256] = {0};
       int nRet = avformat_open_input(&ic, input, ifmt, &pAVDictionary);
       if (nRet != 0)
       {
           av_strerror(nRet, szError, 256);
           printf(szError);
           printf("\n");
           printf("Call avformat_open_input function failed!\n");
           return 0;
       }

       if (av_find_stream_info(ic) < 0)
       {
           printf("Call av_find_stream_info function failed!\n");
           return 0;
       }

       ofmt = av_guess_format("mpegts", NULL, NULL);
       if (!ofmt)
       {
           printf("Call av_guess_format function failed!\n");
           return 0;
       }

       oc = avformat_alloc_context();
       if (!oc)
       {
           printf("Call av_guess_format function failed!\n");
           return 0;
       }
       oc->oformat = ofmt;

       int video_index = -1, audio_index = -1;
       for (unsigned int i = 0; i < ic->nb_streams && (video_index < 0 || audio_index < 0); i++)
       {
           switch (ic->streams[i]->codec->codec_type)
           {
           case AVMEDIA_TYPE_VIDEO:
               video_index = i;
               ic->streams[i]->discard = AVDISCARD_NONE;
               video_st = add_output_stream(oc, ic->streams[i]);
               break;
           case AVMEDIA_TYPE_AUDIO:
               audio_index = i;
               ic->streams[i]->discard = AVDISCARD_NONE;
               audio_st = add_output_stream(oc, ic->streams[i]);
               break;
           default:
               ic->streams[i]->discard = AVDISCARD_ALL;
               break;
           }
       }
       codec = avcodec_find_decoder(video_st->codec->codec_id);
       if (codec == NULL)
       {
           printf("Call avcodec_find_decoder function failed!\n");
           return 0;
       }

       if (avcodec_open(video_st->codec, codec) < 0)
       {
           printf("Call avcodec_open function failed !\n");
           return 0;
       }

       if (avio_open(&oc->pb, output, AVIO_FLAG_WRITE) < 0)
       {
           return 0;
       }

       if (avformat_write_header(oc, &pAVDictionary))
       {
           printf("Call avformat_write_header function failed.\n");
           return 0;
       }

       //[[++
       AVBitStreamFilterContext *bsfc = av_bitstream_filter_init("h264_mp4toannexb");
       //AVBitStreamFilterContext *absfc = av_bitstream_filter_init("aac_adtstoasc");
       if (!bsfc) {
           fprintf(stderr, "bsf init error!\n");
           return -1;
       }
       //]]++

       int decode_done = 0;
       do
       {
           double segment_time = 0;
           AVPacket packet;

           decode_done = av_read_frame(ic, &packet);
           if (decode_done < 0)
               break;

           if (av_dup_packet(&packet) < 0)
           {
               printf("Call av_dup_packet function failed\n");
               av_free_packet(&packet);
               break;
           }

           //[[**
           if (packet.stream_index == audio_index) {
               segment_time = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
               nRet = av_interleaved_write_frame(oc, &packet);  
           } else if (packet.stream_index == video_index) {
               if (packet.flags & AV_PKT_FLAG_KEY) {
                   segment_time = (double)video_st->pts.val * video_st->time_base.num / video_st->time_base.den;
               } else {
                   segment_time = prev_segment_time;
               }
               //nRet = av_interleaved_write_frame(oc, &packet);  
               write_frame(oc, &packet, video_st->codec, bsfc);
           }
           //]]**

           if (nRet < 0)
           {
               printf("Call av_interleaved_write_frame function failed: %d\n", nRet);
           }
           else if (nRet > 0)
           {
               printf("End of stream requested\n");
               av_free_packet(&packet);
               break;
           }
           av_free_packet(&packet);
           frame_count++;
       }while(!decode_done);

       av_write_trailer(oc);
       printf("frame_count = %d\n", frame_count);

       av_bitstream_filter_close(bsfc);  
       avcodec_close(video_st->codec);
       for(unsigned int k = 0; k < oc->nb_streams; k++)
       {
           av_freep(&oc->streams[k]->codec);
           av_freep(&oc->streams[k]);
       }
       av_free(oc);
       //getchar();
       return 0;
    }

    Compile this code, to got an executable file named muxts, and then :

    $ ./muxts vid1.mp4 vid1.ts

    No error message printed,but the audio stream was unsynchronized and noise。Check the .ts file using ffmpeg :

    $ ffmpeg -i vid1.ts
    ffmpeg version 0.8.14-tessus, Copyright (c) 2000-2013 the FFmpeg developers
     built on Jul 29 2013 17:05:18 with llvm_gcc 4.2.1 (Based on Apple Inc. build 5658) (LLVM build 2336.1.00)
     configuration: --prefix=/usr/local --arch=x86_64 --as=yasm --extra-version=tessus --enable-gpl --enable-nonfree --enable-version3 --disable-ffplay --enable-libvorbis --enable-libmp3lame --enable-libx264 --enable-libxvid --enable-bzlib --enable-zlib --enable-postproc --enable-filters --enable-runtime-cpudetect --enable-debug=3 --disable-optimizations
     libavutil    51.  9. 1 / 51.  9. 1
     libavcodec   53.  8. 0 / 53.  8. 0
     libavformat  53.  5. 0 / 53.  5. 0
     libavdevice  53.  1. 1 / 53.  1. 1
     libavfilter   2. 23. 0 /  2. 23. 0
     libswscale    2.  0. 0 /  2.  0. 0
     libpostproc  51.  2. 0 / 51.  2. 0

    Seems stream 0 codec frame rate differs from container frame rate: 180000.00 (180000/1) -> 90000.00 (180000/2)
    Input #0, mpegts, from 'vid1.ts':
     Duration: 00:00:03.75, start: 0.000000, bitrate: 3656 kb/s
     Program 1
       Metadata:
         service_name    : Service01
         service_provider: FFmpeg
       Stream #0.0[0x100]: Video: h264 (Baseline), yuv420p, 640x480, 90k tbr, 90k tbn, 180k tbc
       Stream #0.1[0x101]: Audio: aac, 48000 Hz, mono, s16, 190 kb/s
    At least one output file must be specified

    What should i do ?

    If this issue fixed , how can i concat multi .ts files into single .mp4 file ?

  • parser not found for codec wmav2

    5 décembre 2011, par HashCoder

    I am getting a warning when I run the below command.

    Warning : [asf @ 01C787A0] parser not found for codec wmav2, packets or times may be inval
    id.

    I am using the latest ffmpeg.exe, did I miss any parameters. Any suggestions please.

    ffmpeg -i Assets\Logitech_webcam_on_PC.wmv -sameq -f swf -y -an -s 640x360 MySlide.swf
    ffmpeg version N-35295-gb55dd10, Copyright (c) 2000-2011 the FFmpeg developers
     built on Nov 30 2011 00:52:52 with gcc 4.6.2
     configuration: --enable-gpl --enable-version3 --disable-w32threads --enable-ru
    ntime-cpudetect --enable-avisynth --enable-bzlib --enable-frei0r --enable-libope
    ncore-amrnb --enable-libopencore-amrwb --enable-libfreetype --enable-libgsm --en
    able-libmp3lame --enable-libopenjpeg --enable-librtmp --enable-libschroedinger -
    -enable-libspeex --enable-libtheora --enable-libvo-aacenc --enable-libvo-amrwben
    c --enable-libvorbis --enable-libvpx --enable-libx264 --enable-libxavs --enable-
    libxvid --enable-zlib
     libavutil    51. 29. 1 / 51. 29. 1
     libavcodec   53. 39. 1 / 53. 39. 1
     libavformat  53. 22. 0 / 53. 22. 0
     libavdevice  53.  4. 0 / 53.  4. 0
     libavfilter   2. 50. 0 /  2. 50. 0
     libswscale    2.  1. 0 /  2.  1. 0
     libpostproc  51.  2. 0 / 51.  2. 0
    [asf @ 01C787A0] parser not found for codec wmav2, packets or times may be inval
    id.

    Seems stream 1 codec frame rate differs from container frame rate: 1000.00 (1000
    /1) -> 0.08 (1/12)
    Input #0, asf, from 'Assets\Logitech_webcam_on_PC.wmv':
     Metadata:
       WMFSDKVersion   : 11.0.5721.5265
       WMFSDKNeeded    : 0.0.0.0000
       IsVBR           : 1
       VBR Peak        : 50500.0000
       Buffer Average  : 66550.0000
     Duration: 00:00:36.22, start: 0.000000, bitrate: 497 kb/s
       Stream #0:0(eng): Audio: wmav2 (a[1][0][0] / 0x0161), 32000 Hz, 1 channels,
    s16, 20 kb/s
       Stream #0:1(eng): Video: wmv2 (WMV2 / 0x32564D57), yuv420p, 320x180, 422 kb/
    s, 0.08 tbr, 1k tbn, 1k tbc
    [buffer @ 02AA9760] w:320 h:180 pixfmt:yuv420p tb:1/1000000 sar:0/1 sws_param:
    [scale @ 02AA9A80] w:320 h:180 fmt:yuv420p -> w:640 h:360 fmt:yuv420p flags:0x4
    Output #0, swf, to 'MySlide.swf':
     Metadata:
       WMFSDKVersion   : 11.0.5721.5265
       WMFSDKNeeded    : 0.0.0.0000
       IsVBR           : 1
       VBR Peak        : 50500.0000
       Buffer Average  : 66550.0000
       encoder         : Lavf53.22.0
       Stream #0:0(eng): Video: flv1, yuv420p, 640x360, q=2-31, 200 kb/s, 90k tbn,
    0.08 tbc
    Stream mapping:
     Stream #0:1 -> #0:0 (wmv2 -> flv)
    Press [q] to stop, [?] for help
    frame=    4 fps=  0 q=0.0 size=      97kB time=00:00:48.00 bitrate=  16.6kbits/s
    frame=    5 fps=  0 q=0.0 Lsize=     111kB time=00:01:00.00 bitrate=  15.2kbits/
    s dup=0 drop=599
    video:111kB audio:0kB global headers:0kB muxing overhead 0.128646%
  • PHP FFMPEG not working when I upload mp4 file

    9 août 2013, par Chris

    To convert video to formats compatible with HTML5 video I wrote the following script :

       $srcFile = "name/of/the/video.mp4";
       $destFile = "/media/video/newfilename";
       $ffmpegPath = "/usr/local/bin/ffmpeg";
       $flvtool2Path = "/usr/local/bin/flvtool2";

       // Create our FFMPEG-PHP class
       $ffmpegObj = new ffmpeg_movie($srcFile);
       // Save our needed variables
       $srcWidth = makeMultipleTwo($ffmpegObj->getFrameWidth());
       $srcHeight = makeMultipleTwo($ffmpegObj->getFrameHeight());
       $srcFPS = $ffmpegObj->getFrameRate();
       $srcAB = intval($ffmpegObj->getAudioBitRate()/1000);
       $srcAR = $ffmpegObj->getAudioSampleRate();
       $srcVB = floor($ffmpegObj->getVideoBitRate()/1000);


       // Call our convert using exec() to convert to the three file types needed by HTML5
       exec($ffmpegPath . " -i ". $srcFile ." -vcodec libx264 -vpre hq -vpre ipod640 -b ".$srcVB."k -bt 100k -acodec libfaac -ab " . $srcAB . "k -ac 2 -s " . $srcWidth . "x" . $srcHeight . " ".$destFile.".mp4");

       exec($ffmpegPath . " -i ". $srcFile ." -vcodec libvpx -r ".$srcFPS." -b ".$srcVB."k -acodec libvorbis -ab " . $srcAB . " -ac 2 -f webm -g 30 -s " . $srcWidth . "x" . $srcHeight . " ".$destFile.".webm");

       exec($ffmpegPath . " -i ". $srcFile ." -vcodec libtheora -r ".$srcFPS." -b ".$srcVB."k -acodec libvorbis -ab " . $srcAB . "k -ac 2 -s " . $srcWidth . "x" . $srcHeight . " ".$destFile.".ogv");

    It is supposed to take any input video type and convert it to mp4, ogv and webm. When I run the script on a .mov file it returns a mp4 and ogv file, but not webm. When I run it on a .mp4 file it returns no converted files at all. Is there something wrong with the way I am converting the files ? Any help ?