Recherche avancée

Médias (91)

Autres articles (96)

  • Utilisation et configuration du script

    19 janvier 2011, par

    Informations spécifiques à la distribution Debian
    Si vous utilisez cette distribution, vous devrez activer les dépôts "debian-multimedia" comme expliqué ici :
    Depuis la version 0.3.1 du script, le dépôt peut être automatiquement activé à la suite d’une question.
    Récupération du script
    Le script d’installation peut être récupéré de deux manières différentes.
    Via svn en utilisant la commande pour récupérer le code source à jour :
    svn co (...)

  • Installation en mode ferme

    4 février 2011, par

    Le mode ferme permet d’héberger plusieurs sites de type MediaSPIP en n’installant qu’une seule fois son noyau fonctionnel.
    C’est la méthode que nous utilisons sur cette même plateforme.
    L’utilisation en mode ferme nécessite de connaïtre un peu le mécanisme de SPIP contrairement à la version standalone qui ne nécessite pas réellement de connaissances spécifique puisque l’espace privé habituel de SPIP n’est plus utilisé.
    Dans un premier temps, vous devez avoir installé les mêmes fichiers que l’installation (...)

  • Le plugin : Podcasts.

    14 juillet 2010, par

    Le problème du podcasting est à nouveau un problème révélateur de la normalisation des transports de données sur Internet.
    Deux formats intéressants existent : Celui développé par Apple, très axé sur l’utilisation d’iTunes dont la SPEC est ici ; Le format "Media RSS Module" qui est plus "libre" notamment soutenu par Yahoo et le logiciel Miro ;
    Types de fichiers supportés dans les flux
    Le format d’Apple n’autorise que les formats suivants dans ses flux : .mp3 audio/mpeg .m4a audio/x-m4a .mp4 (...)

Sur d’autres sites (3434)

  • Why when using ffmpeg to create in real time avi video file from images the avi file is playing with purple noisy color ?

    30 juin 2015, par Brubaker Haim

    This is my Ffmpeg class i did some time ago

    using System;
    using System.Windows.Forms;
    using System.Collections.Generic;
    using System.Linq;
    using System.Text;
    using System.Threading.Tasks;
    using System.Drawing;
    using System.IO.Pipes;
    using System.Runtime.InteropServices;
    using System.Diagnostics;
    using System.IO;
    using DannyGeneral;

    namespace Manager
    {
       class Ffmpeg
       {
           NamedPipeServerStream p;
           String pipename = "mytestpipe";
           System.Diagnostics.Process process;
           string ffmpegFileName = "ffmpeg.exe";
           string workingDirectory;

           public Ffmpeg()
           {
               workingDirectory = Path.GetDirectoryName(Application.ExecutablePath);
               Logger.Write("workingDirectory: " + workingDirectory);
               if (!Directory.Exists(workingDirectory))
               {
                   Directory.CreateDirectory(workingDirectory);
               }
               ffmpegFileName = Path.Combine(workingDirectory, ffmpegFileName);
               Logger.Write("FfmpegFilename: " + ffmpegFileName);
           }

           public void Start(string pathFileName, int BitmapRate)
           {
               try
               {

                   string outPath = pathFileName;
                   p = new NamedPipeServerStream(pipename, PipeDirection.Out, 1, PipeTransmissionMode.Byte);

                   ProcessStartInfo psi = new ProcessStartInfo();
                   psi.WindowStyle = ProcessWindowStyle.Hidden;
                   psi.UseShellExecute = false;
                   psi.CreateNoWindow = false;
                   psi.FileName = ffmpegFileName;
                   psi.WorkingDirectory = workingDirectory;
                   psi.Arguments = @"-f rawvideo -pix_fmt yuv420p -video_size 1920x1080 -i \\.\pipe\mytestpipe -map 0 -c:v mpeg4 -r " + BitmapRate + " " + outPath;
                   process = Process.Start(psi);
                   process.EnableRaisingEvents = false;
                   psi.RedirectStandardError = true;
                   p.WaitForConnection();
               }
               catch (Exception err)
               {
                   Logger.Write("Exception Error: " + err.ToString());
               }
           }

           public void PushFrame(Bitmap bmp)
           {
               try
               {
                   int length;
                   // Lock the bitmap's bits.
                   //bmp = new Bitmap(1920, 1080);
                   Rectangle rect = new Rectangle(0, 0, bmp.Width, bmp.Height);
                   //Rectangle rect = new Rectangle(0, 0, 1280, 720);
                   System.Drawing.Imaging.BitmapData bmpData =
                       bmp.LockBits(rect, System.Drawing.Imaging.ImageLockMode.ReadOnly,
                       bmp.PixelFormat);

                   int absStride = Math.Abs(bmpData.Stride);
                   // Get the address of the first line.
                   IntPtr ptr = bmpData.Scan0;

                   // Declare an array to hold the bytes of the bitmap.
                   //length = 3 * bmp.Width * bmp.Height;
                   length = absStride * bmpData.Height;
                   byte[] rgbValues = new byte[length];

                   //Marshal.Copy(ptr, rgbValues, 0, length);
                   int j = bmp.Height - 1;
                   for (int i = 0; i < bmp.Height; i++)
                   {
                       IntPtr pointer = new IntPtr(bmpData.Scan0.ToInt32() + (bmpData.Stride * j));
                       System.Runtime.InteropServices.Marshal.Copy(pointer, rgbValues, absStride * (bmp.Height - i - 1), absStride);
                       j--;
                   }
                   p.Write(rgbValues, 0, length);
                   bmp.UnlockBits(bmpData);
               }
               catch(Exception err)
               {
                   Logger.Write("Error: " + err.ToString());
               }

           }

           public void Close()
           {
               p.Close();
           }
       }
    }

    And i’m using it in form1 in a button click event :

    private void button1_Click(object sender, EventArgs e)
           {
               timer1.Start();
           }

    the directroy screenshots is where i’m taking a screenshot every 100ms in the timer1 tick event :

       ScreenShot shot = new ScreenShot();
       public static int counter = 0;
       private void timer1_Tick(object sender, EventArgs e)
       {
           counter++;
           shot.GetScreenShot(@"e:\screenshots\", "screenshot");
           if (counter == 1200)
           {
               timer1.Stop();
           }
       }

    I’m calling the method PushFrame from inside the ScreenShot class where i save the screenshots.

    Ffmpeg fmpeg;

    Then :

    fmpeg = new Ffmpeg();
    fmpeg.Start(@"e:\screenshots\test.avi", 25);

    And :

    public Bitmap GetScreenShot(string folder, string name)
       {
           _screenShot = new Bitmap(GetScreen());
           System.GC.Collect();
           System.GC.WaitForPendingFinalizers();
           string ingName = folder + name + Elgato_Video_Capture.counter.ToString("D6") + ".bmp";
           _screenShot.Save(ingName);
           fmpeg.PushFrame(_screenShot);
           _screenShot.Dispose();

           return _screenShot;
       }

    All the images on the hard disk are fine i can edit/open them and watch them no problems.
    They are also same size.

    The result in the end is one big avi file 1.08 GB size.
    But when i play it i see many windows running inside very fast and all painted with noisy purple color.

    Here a screenshot from the video file when playing it :

    avi file playing screenshot

    I think the problem is somewhere in the Ffmpeg class where i give parameters to the ffmpeg.exe

    psi.Arguments = @"-f rawvideo -pix_fmt yuv420p -video_size 1920x1080 -i \\.\pipe\mytestpipe -map 0 -c:v mpeg4 -r " + BitmapRate + " " + outPath;

    Not sure what make this avi file to look like that.

    This is the video file the result i got : https://www.youtube.com/watch?v=fdxPus-Xv1k&feature=youtu.be

  • How to properly close a FFmpeg stream and AVFormatContext without leaking memory ?

    13 décembre 2019, par Darkwonder

    I have built an app that uses FFmpeg to connect to remote IP cameras in order to receive video and audio frames via RTSP 2.0.

    The app is built using Xcode 10-11 and Objective-C with a custom FFmpeg build config.

    The architecture is the following :

    MyApp


    Document_0

       RTSPContainerObject_0
           RTSPObject_0

       RTSPContainerObject_1
           RTSPObject_1

       ...
    Document_1
    ...

    GOAL :

    1. After closing Document_0 no FFmpeg objects should be leaked.
    2. The closing process should stop-frame reading and destroy all objects which use FFmpeg.

    PROBLEM :

    enter image description here

    1. Somehow Xcode’s memory debugger shows two instances of MyApp.

    FACTS :

    • macOS’es Activity Monitor doesn’t show two instances of MyApp.

    • macOS’es Activity Monitor doesn’t any instances of FFmpeg or other child processes.

    • The issue is not related to some leftover memory due to a late memory snapshot since it can be reproduced easily.

    • Xcode’s memory debugger shows that the second instance only having RTSPObject's AVFormatContext and no other objects.

      1. The second instance has an AVFormatContext and the RTPSObject still has a pointer to the AVFormatContext.

    FACTS :

    • Opening and closing the second document Document_1 leads to the same problem and having two objects leaked. This means that there is a bug that creates scalable problems. More and more memory is used and unavailable.

    Here is my termination code :

      - (void)terminate
    {
       // * Video and audio frame provisioning termination *
       [self stopVideoStream];
       [self stopAudioStream];
       // *

       // * Video codec termination *
       avcodec_free_context(&_videoCodecContext); // NULL pointer safe.
       self.videoCodecContext = NULL;
       // *

    // * Audio codec termination *
    avcodec_free_context(&_audioCodecContext); // NULL pointer safe.
    self.audioCodecContext = NULL;
    // *

    if (self.packet)
    {
       // Free the packet that was allocated by av_read_frame.
       av_packet_unref(&packet); // The documentation doesn't mention NULL safety.
       self.packet = NULL;
    }

    if (self.currentAudioPacket)
    {
       av_packet_unref(_currentAudioPacket);
       self.currentAudioPacket = NULL;
    }

    // Free raw frame data.
    av_freep(&_rawFrameData); // NULL pointer safe.

    // Free the swscaler context swsContext.
    self.isFrameConversionContextAllocated = NO;
    sws_freeContext(scallingContext); // NULL pointer safe.

    [self.audioPacketQueue removeAllObjects];

    self.audioPacketQueue = nil;

    self.audioPacketQueueLock = nil;
    self.packetQueueLock = nil;
    self.audioStream = nil;
    BXLogInDomain(kLogDomainSources, kLogLevelVerbose, @"%s:%d: All streams have been terminated!", __FUNCTION__, __LINE__);

    // * Session context termination *
    AVFormatContext *pFormatCtx = self.sessionContext;
    BOOL shouldProceedWithInputSessionTermination = self.isInputStreamOpen && self.shouldTerminateStreams && pFormatCtx;
    NSLog(@"\nTerminating session context...");
    if (shouldProceedWithInputSessionTermination)
    {
       NSLog(@"\nTerminating...");
       //av_write_trailer(pFormatCtx);
       // Discard all internally buffered data.
       avformat_flush(pFormatCtx); // The documentation doesn't mention NULL safety.
       // Close an opened input AVFormatContext and free it and all its contents.
       // WARNING: Closing an non-opened stream will cause avformat_close_input to crash.
       avformat_close_input(&pFormatCtx); // The documentation doesn't mention NULL safety.
       NSLog(@"Logging leftovers - %p, %p  %p", self.sessionContext, _sessionContext, pFormatCtx);
       avformat_free_context(pFormatCtx);

       NSLog(@"Logging content = %c", *self.sessionContext);
       //avformat_free_context(pFormatCtx); - Not needed because avformat_close_input is closing it.
       self.sessionContext = NULL;
    }
    // *

    }

    IMPORTANT : The termination sequence is :

       New frame will be read.
    -[(RTSPObject)StreamInput currentVideoFrameDurationSec]
    -[(RTSPObject)StreamInput frameDuration:]
    -[(RTSPObject)StreamInput currentCGImageRef]
    -[(RTSPObject)StreamInput convertRawFrameToRGB]
    -[(RTSPObject)StreamInput pixelBufferFromImage:]
    -[(RTSPObject)StreamInput cleanup]
    -[(RTSPObject)StreamInput dealloc]
    -[(RTSPObject)StreamInput stopVideoStream]
    -[(RTSPObject)StreamInput stopAudioStream]

    Terminating session context...
    Terminating...
    Logging leftovers - 0x109ec6400, 0x109ec6400  0x109ec6400
    Logging content = \330
    -[Document dealloc]

    NOT WORKING SOLUTIONS :

    • Changing the order of object releases (The AVFormatContext has been freed first but it didn’t lead to any change).
    • Calling RTSPObject's cleanup method much sooner to give FFmpeg more time to handle object releases.
    • Reading a lot of SO answers and FFmpeg documentation to find a clean cleanup process or newer code which might highlight why the object release doesn’t happen properly.

    I am currently reading the documentation on AVFormatContext since I believe that I am forgetting to release something. This believe is based on the memory debuggers output that AVFormatContext is still around.

    Here is my creation code :

    #pragma mark # Helpers - Start

    - (NSError *)openInputStreamWithVideoStreamId:(int)videoStreamId
                                   audioStreamId:(int)audioStreamId
                                        useFirst:(BOOL)useFirstStreamAvailable
                                          inInit:(BOOL)isInitProcess
    {
       // NSLog(@"%s", __PRETTY_FUNCTION__); // RTSP
       self.status = StreamProvisioningStatusStarting;
       AVCodec *decoderCodec;
       NSString *rtspURL = self.streamURL;
       NSString *errorMessage = nil;
       NSError *error = nil;

       self.sessionContext = NULL;
       self.sessionContext = avformat_alloc_context();

       AVFormatContext *pFormatCtx = self.sessionContext;
       if (!pFormatCtx)
       {
           // Create approp error.
           return error;
       }


       // MUST be called before avformat_open_input().
       av_dict_free(&_sessionOptions);

           self.sessionOptions = 0;
           if (self.usesTcp)
           {
               // "rtsp_transport" - Set RTSP transport protocols.
               // Allowed are: udp_multicast, tcp, udp, http.
               av_dict_set(&_sessionOptions, "rtsp_transport", "tcp", 0);
           }
           av_dict_set(&_sessionOptions, "rtsp_transport", "tcp", 0);

       // Open an input stream and read the header with the demuxer options.
       // WARNING: The stream must be closed with avformat_close_input()
       if (avformat_open_input(&pFormatCtx, rtspURL.UTF8String, NULL, &_sessionOptions) != 0)
       {
           // WARNING: Note that a user-supplied AVFormatContext (pFormatCtx) will be freed on failure.
           self.isInputStreamOpen = NO;
           // Create approp error.
           return error;
       }

       self.isInputStreamOpen = YES;

       // user-supplied AVFormatContext pFormatCtx might have been modified.
       self.sessionContext = pFormatCtx;

       // Retrieve stream information.
       if (avformat_find_stream_info(pFormatCtx,NULL) < 0)
       {
           // Create approp error.
           return error;
       }

       // Find the first video stream
       int streamCount = pFormatCtx->nb_streams;

       if (streamCount == 0)
       {
           // Create approp error.
           return error;
       }

       int noStreamsAvailable = pFormatCtx->streams == NULL;

       if (noStreamsAvailable)
       {
           // Create approp error.
           return error;
       }

       // Result. An Index can change, an identifier shouldn't.
       self.selectedVideoStreamId = STREAM_NOT_FOUND;
       self.selectedAudioStreamId = STREAM_NOT_FOUND;

       // Fallback.
       int firstVideoStreamIndex = STREAM_NOT_FOUND;
       int firstAudioStreamIndex = STREAM_NOT_FOUND;

       self.selectedVideoStreamIndex = STREAM_NOT_FOUND;
       self.selectedAudioStreamIndex = STREAM_NOT_FOUND;

       for (int i = 0; i < streamCount; i++)
       {
           // Looking for video streams.
           AVStream *stream = pFormatCtx->streams[i];
           if (!stream) { continue; }
           AVCodecParameters *codecPar = stream->codecpar;
           if (!codecPar) { continue; }

           if (codecPar->codec_type==AVMEDIA_TYPE_VIDEO)
           {
               if (stream->id == videoStreamId)
               {
                   self.selectedVideoStreamId = videoStreamId;
                   self.selectedVideoStreamIndex = i;
               }

               if (firstVideoStreamIndex == STREAM_NOT_FOUND)
               {
                   firstVideoStreamIndex = i;
               }
           }
           // Looking for audio streams.
           if (codecPar->codec_type==AVMEDIA_TYPE_AUDIO)
           {
               if (stream->id == audioStreamId)
               {
                   self.selectedAudioStreamId = audioStreamId;
                   self.selectedAudioStreamIndex = i;
               }

               if (firstAudioStreamIndex == STREAM_NOT_FOUND)
               {
                   firstAudioStreamIndex = i;
               }
           }
       }

       // Use first video and audio stream available (if possible).

       if (self.selectedVideoStreamIndex == STREAM_NOT_FOUND && useFirstStreamAvailable && firstVideoStreamIndex != STREAM_NOT_FOUND)
       {
           self.selectedVideoStreamIndex = firstVideoStreamIndex;
           self.selectedVideoStreamId = pFormatCtx->streams[firstVideoStreamIndex]->id;
       }

       if (self.selectedAudioStreamIndex == STREAM_NOT_FOUND && useFirstStreamAvailable && firstAudioStreamIndex != STREAM_NOT_FOUND)
       {
           self.selectedAudioStreamIndex = firstAudioStreamIndex;
           self.selectedAudioStreamId = pFormatCtx->streams[firstAudioStreamIndex]->id;
       }

       if (self.selectedVideoStreamIndex == STREAM_NOT_FOUND)
       {
           // Create approp error.
           return error;
       }

       // See AVCodecID for codec listing.

       // * Video codec setup:
       // 1. Find the decoder for the video stream with the gived codec id.
       AVStream *stream = pFormatCtx->streams[self.selectedVideoStreamIndex];
       if (!stream)
       {
           // Create approp error.
           return error;
       }
       AVCodecParameters *codecPar = stream->codecpar;
       if (!codecPar)
       {
           // Create approp error.
           return error;
       }

       decoderCodec = avcodec_find_decoder(codecPar->codec_id);
       if (decoderCodec == NULL)
       {
           // Create approp error.
           return error;
       }

       // Get a pointer to the codec context for the video stream.
       // WARNING: The resulting AVCodecContext should be freed with avcodec_free_context().
       // Replaced:
       // self.videoCodecContext = pFormatCtx->streams[self.selectedVideoStreamIndex]->codec;
       // With:
       self.videoCodecContext = avcodec_alloc_context3(decoderCodec);
       avcodec_parameters_to_context(self.videoCodecContext,
                                     codecPar);

       self.videoCodecContext->thread_count = 4;
       NSString *description = [NSString stringWithUTF8String:decoderCodec->long_name];

       // 2. Open codec.
       if (avcodec_open2(self.videoCodecContext, decoderCodec, NULL) < 0)
       {
           // Create approp error.
           return error;
       }

       // * Audio codec setup:
       if (self.selectedAudioStreamIndex > -1)
       {
           [self setupAudioDecoder];
       }

       // Allocate a raw video frame data structure. Contains audio and video data.
       self.rawFrameData = av_frame_alloc();

       self.outputWidth = self.videoCodecContext->width;
       self.outputHeight = self.videoCodecContext->height;

       if (!isInitProcess)
       {
           // Triggering notifications in init process won't change UI since the object is created locally. All
           // objects which need data access to this object will not be able to get it. Thats why we don't notifiy anyone about the changes.
           [NSNotificationCenter.defaultCenter postNotificationName:NSNotification.rtspVideoStreamSelectionChanged
                                                             object:nil userInfo: self.selectedVideoStream];

           [NSNotificationCenter.defaultCenter postNotificationName:NSNotification.rtspAudioStreamSelectionChanged
                                                             object:nil userInfo: self.selectedAudioStream];
       }

       return nil;
    }

    UPDATE 1

    The initial architecture allowed using any given thread. Most of the below code would mostly run on the main thread. This solution was not appropriate since the opening of the stream input can take several seconds for which the main thread is blocked while waiting for a network response inside FFmpeg. To solve this issue I have implemented the following solution :

    • Creation and the initial setup are only allowed on the background_thread (see code snippet "1" below).
    • Changes are allowed on the current_thread(Any).
    • Termination is allowed on the current_thread(Any).

    After removing main thread checks and dispatch_asyncs to background threads, leaking has stopped and I can’t reproduce the issue anymore :

    // Code that produces the issue.  
    dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^{
       // 1 - Create and do initial setup.
       // This block creates the issue.
    [self.rtspObject = [[RTSPObject alloc] initWithURL: ... ];
    [self.rtspObject openInputStreamWithVideoStreamId: ...
                                   audioStreamId: ...
                                        useFirst: ...
                                          inInit: ...];
    });

    I still don’t understand why Xcode’s memory debugger says that this block is retained ?

    Any advice or idea is welcome.

  • yet another screenshot encoding exercise with ffmpeg - stuck at getting AVFrame from ALT::CImage - VC++

    11 septembre 2013, par sith

    Total AV newbee here - trying to learn the ropes on using FFMpeg functions to encode movies. On searching for tutorials I found a few similar questions that I have linked here for reference :

    Encoding a screenshot into a video using FFMPEG

    [Libav-user] Encoding a screenshot into a video using FFMPEG

    Save bitmap to video (libavcodec ffmpeg)

    When converting from RGB to YUV using ffmpeg the video file the color is spread why ?

    How to convert RGB from YUV420p for ffmpeg encoder ?

    Encode bmp sequence with libavcodec...Help !

    Not able to encode image with ffmpeg

    For my setup FFMPEG is on VS12 - VC++ with MFC on win7.

    With the help of above samples, I am able to get "some" output from the encoder, but I am not sure in what format or state the output has been encoded. Neither VLC nor WMP can play this file. It does not even seem to recognize the metadata in the file to display the FPS or video length. What would normally cause that ? Also any pointers on what could be going wrong and how to approach fixing the problems would be great. [1]

    Here is the flow of my code :

    Step1 : capture desktop on to a CImg :

    int W=GetSystemMetrics(SM_CXSCREEN), H=GetSystemMetrics(SM_CYSCREEN), bpp=24;
    CImage cImg; cImg.Create(W,H,bpp)
    HDC hDC = cImg.GetDC();
    CWindowDC winDC(GetDesktopWindow());

    BitBlt(hDC, 0,0, rez.W(), rez.H(), winDC.m_hDC, 0, 0, SRCCOPY);

    At this point I am able to dump a screen shot into a bmp file -
    using cImg.Save( _T("test.bmp"), Gdiplus::ImageFormatBMP) ;

    Step2 : Extract the BMP bits from the CImg.

    HBITMAP hBitmap = (HBITMAP)cImg;
    HDC memDC = CreateCompatibleDC(NULL);
    SelectObject( memDC, hBitmap );

    BITMAPINFO bmi; // initialized bmi with {W,-H, plane=1, bitCount=24, comp=BI_RGB, size=W*H*3 }
    << removed bmi init code for conciseness. >>>

    BYTE *rgb24Data = new BYTE[W*H*3]; // 3 for 24bpp. 4 for 32...
    int ret = GetDIBits(memDC, hBitmap, 0, H, rgb24Data, &bmi, DIB_RGB_COLORS);

    At this point I faithfully believe rgb24Data points to pixel data :) - copied out of the cImg bitmap

    Step 3 : next I try to create an AV frame with the rgb24Data got from this CImg. Also this is where I have a massive knowledge gap. I am going to try and recover

    // setup the codecs and contexts here as per mohM's post

    AVCodec *currCodec = avcodec_find_encoder(CODEC_ID_MPEG4);

    AVCodecContext *codeCtxt = avcodec_alloc_context();  // init this with bate=400k, W, H,
    << removed codeCtxt init code for conciseness. >>>   //  time base 1/25, gop=10, max_b=1, fmt=YUV420

    avcodec_open(codeCtxt, currCodec);

    SwsContext *currSWSCtxt = sws_getContext( W, H, AV_PIX_FMT_RGB24, // FROM
                                             W, H, AV_PIX_FMT_YUV420P, // TO
                                             SWS_FAST_BILINEAR,
                                             NULL, NULL, NULL);

    // allocate and fill AVFrame
    int numBytes = avpicture_get_size(PIX_FMT_YUV420P, W, H);
    uint8_t *buffer=new uint8_t[numBytes];
    AVFrame *avFrame = avcodec_alloc_frame();
    avpicture_fill( (AVPicture*)avFrame, buffer, PIX_FMT_YUV420P, W, H );

    Step 4 : transform the data frame into YUV420P as we fill the frame.

    uint8_t * inData[1] = { rgb24Data };
    int inLinesize[1] = { 3*W }; // RGB stride
    sws_scale( currSWSCtxt, inData, inLinesize, 0, H,
              avFrame->data, avFrame->linesize);

    step 5 encode the frame and write out the output buffer into a file.

    int out_size = avcodec_encode_video( codeCtxt,
                                        outBuf,
                                        outBufSize,
                                        avFrame );

    fwrite(outBuf, 1, outBufSize, outFile );

    finally I close the file off with [0x00 0x00 0x01 0xb7]

    The first hint of things gone haywire is that for a 50 screens of 1920X1080 at 24bpp encoded at 25fps gives me a 507MB unplayable-mpeg file.

    As mentioned earlier, neither VLC nor WMP can play this file nor they even recognize the metadata in the file to display the FPS or video length. What would normally cause that ? Also any pointers on what could be going wrong and how to approach fixing the problems would be great. [2]

    Any guidance is much appreciated.