
Recherche avancée
Autres articles (60)
-
Configurer la prise en compte des langues
15 novembre 2010, parAccéder à la configuration et ajouter des langues prises en compte
Afin de configurer la prise en compte de nouvelles langues, il est nécessaire de se rendre dans la partie "Administrer" du site.
De là, dans le menu de navigation, vous pouvez accéder à une partie "Gestion des langues" permettant d’activer la prise en compte de nouvelles langues.
Chaque nouvelle langue ajoutée reste désactivable tant qu’aucun objet n’est créé dans cette langue. Dans ce cas, elle devient grisée dans la configuration et (...) -
XMP PHP
13 mai 2011, parDixit Wikipedia, XMP signifie :
Extensible Metadata Platform ou XMP est un format de métadonnées basé sur XML utilisé dans les applications PDF, de photographie et de graphisme. Il a été lancé par Adobe Systems en avril 2001 en étant intégré à la version 5.0 d’Adobe Acrobat.
Étant basé sur XML, il gère un ensemble de tags dynamiques pour l’utilisation dans le cadre du Web sémantique.
XMP permet d’enregistrer sous forme d’un document XML des informations relatives à un fichier : titre, auteur, historique (...) -
Les tâches Cron régulières de la ferme
1er décembre 2010, parLa gestion de la ferme passe par l’exécution à intervalle régulier de plusieurs tâches répétitives dites Cron.
Le super Cron (gestion_mutu_super_cron)
Cette tâche, planifiée chaque minute, a pour simple effet d’appeler le Cron de l’ensemble des instances de la mutualisation régulièrement. Couplée avec un Cron système sur le site central de la mutualisation, cela permet de simplement générer des visites régulières sur les différents sites et éviter que les tâches des sites peu visités soient trop (...)
Sur d’autres sites (4081)
-
ffmpeg record video plays too fast
29 avril 2023, par Kris XiaI'm a college student and I am studying FFmpeg now.



I have wrote a software that can record desktops and audio('virtual-audio-capturer') with FFmpeg.And I am now writing Audio and Video Synchronization.
I met some problems that video recording plays too fast.



When I look for audio and video synchronization help on the Internet,I find a formula for calculating PTS :



pts = n * ((1 / timbase)/ fps)



When I use this formula,I find a phenomenon.



1.The higher frame rate is,the faster the video playback speed.



2.The slower the frame rate, the faster the video playback.



Also I find while the framerate is 10,the video playback speed will be right.



Why has this situation happened ?



I have thought this question for three days. I really hope someone can help me solve this problem.



I really appreciate the help.



#include "stdafx.h"

#ifdef __cplusplus
extern "C"
{
#endif
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
#include "libavdevice/avdevice.h"
#include "libavutil/audio_fifo.h"

#include "libavfilter/buffersink.h"
#include "libavfilter/buffersrc.h"
#include "libavutil/imgutils.h"
#include "libavutil/mathematics.h"
#include "libavutil/samplefmt.h"
#include "libavutil/time.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "libavutil/file.h"
#include "libavutil/mem.h"
#include "libavutil/frame.h"
#include "libavfilter/avfilter.h"
#include "libswresample/swresample.h"

#pragma comment(lib, "avcodec.lib")
#pragma comment(lib, "avformat.lib")
#pragma comment(lib, "avutil.lib")
#pragma comment(lib, "avdevice.lib")
#pragma comment(lib, "avfilter.lib")

#pragma comment(lib, "avfilter.lib")
#pragma comment(lib, "postproc.lib")
#pragma comment(lib, "swresample.lib")
#pragma comment(lib, "swscale.lib")
#ifdef __cplusplus
};
#endif

AVFormatContext *pFormatCtx_Video = NULL, *pFormatCtx_Audio = NULL, *pFormatCtx_Out = NULL;

AVCodecContext *outVideoCodecCtx = NULL;
AVCodecContext *outAudioCodecCtx = NULL;

AVStream *pVideoStream = NULL, *pAudioStream = NULL;

AVCodec *outAVCodec;
AVCodec *outAudioCodec;

AVCodecContext *pCodecCtx_Video;
AVCodec *pCodec_Video;
AVFifoBuffer *fifo_video = NULL;
AVAudioFifo *fifo_audio = NULL;
int VideoIndex, AudioIndex;
int codec_id;

CRITICAL_SECTION AudioSection, VideoSection;



SwsContext *img_convert_ctx;
int frame_size = 0;

uint8_t *picture_buf = NULL, *frame_buf = NULL;

bool bCap = true;

DWORD WINAPI ScreenCapThreadProc( LPVOID lpParam );
DWORD WINAPI AudioCapThreadProc( LPVOID lpParam );

int OpenVideoCapture()
{
 AVInputFormat *ifmt=av_find_input_format("gdigrab");
 AVDictionary *options = NULL;
 av_dict_set(&options, "framerate", "60", NULL);
 if(avformat_open_input(&pFormatCtx_Video, "desktop", ifmt, &options)!=0)
 {
 printf("Couldn't open input stream.(无法打开视频输入流)\n");
 return -1;
 }
 if(avformat_find_stream_info(pFormatCtx_Video,NULL)<0)
 {
 printf("Couldn't find stream information.(无法获取视频流信息)\n");
 return -1;
 }
 if (pFormatCtx_Video->streams[0]->codec->codec_type != AVMEDIA_TYPE_VIDEO)
 {
 printf("Couldn't find video stream information.(无法获取视频流信息)\n");
 return -1;
 }
 pCodecCtx_Video = pFormatCtx_Video->streams[0]->codec;
 pCodec_Video = avcodec_find_decoder(pCodecCtx_Video->codec_id);
 if(pCodec_Video == NULL)
 {
 printf("Codec not found.(没有找到解码器)\n");
 return -1;
 }
 if(avcodec_open2(pCodecCtx_Video, pCodec_Video, NULL) < 0)
 {
 printf("Could not open codec.(无法打开解码器)\n");
 return -1;
 }

 av_dump_format(pFormatCtx_Video, 0, NULL, 0);

 img_convert_ctx = sws_getContext(pCodecCtx_Video->width, pCodecCtx_Video->height, pCodecCtx_Video->pix_fmt, 
 pCodecCtx_Video->width, pCodecCtx_Video->height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL); 

 frame_size = avpicture_get_size(pCodecCtx_Video->pix_fmt, pCodecCtx_Video->width, pCodecCtx_Video->height);
 fifo_video = av_fifo_alloc(30 * avpicture_get_size(AV_PIX_FMT_YUV420P, pCodecCtx_Video->width, pCodecCtx_Video->height));

 return 0;
}

static char *dup_wchar_to_utf8(wchar_t *w)
{
 char *s = NULL;
 int l = WideCharToMultiByte(CP_UTF8, 0, w, -1, 0, 0, 0, 0);
 s = (char *) av_malloc(l);
 if (s)
 WideCharToMultiByte(CP_UTF8, 0, w, -1, s, l, 0, 0);
 return s;
}

int OpenAudioCapture()
{
 AVInputFormat *pAudioInputFmt = av_find_input_format("dshow");
 char * psDevName = dup_wchar_to_utf8(L"audio=virtual-audio-capturer");

 if (avformat_open_input(&pFormatCtx_Audio, psDevName, pAudioInputFmt,NULL) < 0)
 {
 printf("Couldn't open input stream.(无法打开音频输入流)\n");
 return -1;
 }

 if(avformat_find_stream_info(pFormatCtx_Audio,NULL)<0) 
 return -1; 

 if(pFormatCtx_Audio->streams[0]->codec->codec_type != AVMEDIA_TYPE_AUDIO)
 {
 printf("Couldn't find video stream information.(无法获取音频流信息)\n");
 return -1;
 }

 AVCodec *tmpCodec = avcodec_find_decoder(pFormatCtx_Audio->streams[0]->codec->codec_id);
 if(0 > avcodec_open2(pFormatCtx_Audio->streams[0]->codec, tmpCodec, NULL))
 {
 printf("can not find or open audio decoder!\n");
 }

 av_dump_format(pFormatCtx_Audio, 0, NULL, 0);

 return 0;
}

int OpenOutPut()
{
 AVStream *pVideoStream = NULL, *pAudioStream = NULL;
 const char *outFileName = "test.mp4";
 avformat_alloc_output_context2(&pFormatCtx_Out, NULL, NULL, outFileName);

 if (pFormatCtx_Video->streams[0]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
 {
 VideoIndex = 0;
 pVideoStream = avformat_new_stream(pFormatCtx_Out, NULL);
 if (!pVideoStream)
 {
 printf("can not new stream for output!\n");
 return -1;
 }

 outVideoCodecCtx = avcodec_alloc_context3(outAVCodec);
 if ( !outVideoCodecCtx )
 {
 printf("Error : avcodec_alloc_context3()\n");
 return -1;
 }

 //set codec context param
 outVideoCodecCtx = pVideoStream->codec;
 outVideoCodecCtx->codec_id = AV_CODEC_ID_MPEG4;
 outVideoCodecCtx->width = pFormatCtx_Video->streams[0]->codec->width;
 outVideoCodecCtx->height = pFormatCtx_Video->streams[0]->codec->height;
 outVideoCodecCtx->time_base = pFormatCtx_Video->streams[0]->codec->time_base;
 outVideoCodecCtx->pix_fmt = AV_PIX_FMT_YUV420P;
 outVideoCodecCtx->codec_type = AVMEDIA_TYPE_VIDEO;

 if (codec_id == AV_CODEC_ID_H264)
 {
 av_opt_set(outVideoCodecCtx->priv_data, "preset", "slow", 0);
 }

 outAVCodec = avcodec_find_encoder(AV_CODEC_ID_MPEG4);
 if( !outAVCodec )
 {
 printf("\n\nError : avcodec_find_encoder()");
 return -1;
 }
 if (pFormatCtx_Out->oformat->flags & AVFMT_GLOBALHEADER)
 outVideoCodecCtx->flags |=CODEC_FLAG_GLOBAL_HEADER;

 if ((avcodec_open2(outVideoCodecCtx,outAVCodec, NULL)) < 0)
 {
 printf("can not open the encoder\n");
 return -1;
 }
 }

 if(pFormatCtx_Audio->streams[0]->codec->codec_type == AVMEDIA_TYPE_AUDIO)
 {
 AVCodecContext *pOutputCodecCtx;
 AudioIndex = 1;
 pAudioStream = avformat_new_stream(pFormatCtx_Out, NULL);

 pAudioStream->codec->codec = avcodec_find_encoder(pFormatCtx_Out->oformat->audio_codec);

 pOutputCodecCtx = pAudioStream->codec;

 pOutputCodecCtx->sample_rate = pFormatCtx_Audio->streams[0]->codec->sample_rate;
 pOutputCodecCtx->channel_layout = pFormatCtx_Out->streams[0]->codec->channel_layout;
 pOutputCodecCtx->channels = av_get_channel_layout_nb_channels(pAudioStream->codec->channel_layout);
 if(pOutputCodecCtx->channel_layout == 0)
 {
 pOutputCodecCtx->channel_layout = AV_CH_LAYOUT_STEREO;
 pOutputCodecCtx->channels = av_get_channel_layout_nb_channels(pOutputCodecCtx->channel_layout);

 }
 pOutputCodecCtx->sample_fmt = pAudioStream->codec->codec->sample_fmts[0];
 AVRational time_base={1, pAudioStream->codec->sample_rate};
 pAudioStream->time_base = time_base;
 //audioCodecCtx->time_base = time_base;

 pOutputCodecCtx->codec_tag = 0; 
 if (pFormatCtx_Out->oformat->flags & AVFMT_GLOBALHEADER) 
 pOutputCodecCtx->flags |= CODEC_FLAG_GLOBAL_HEADER;

 if (avcodec_open2(pOutputCodecCtx, pOutputCodecCtx->codec, 0) < 0)
 {
 printf("编码器打开失败,退出程序\n");
 return -1;
 }
 }

 if (!(pFormatCtx_Out->oformat->flags & AVFMT_NOFILE))
 {
 if(avio_open(&pFormatCtx_Out->pb, outFileName, AVIO_FLAG_WRITE) < 0)
 {
 printf("can not open output file handle!\n");
 return -1;
 }
 }

 if(avformat_write_header(pFormatCtx_Out, NULL) < 0)
 {
 printf("can not write the header of the output file!\n");
 return -1;
 }

 return 0;
}

int _tmain(int argc, _TCHAR* argv[])
{
 av_register_all();
 avdevice_register_all();
 if (OpenVideoCapture() < 0)
 {
 return -1;
 }
 if (OpenAudioCapture() < 0)
 {
 return -1;
 }
 if (OpenOutPut() < 0)
 {
 return -1;
 }
// int fps;
 /*printf("输入帧率:");
 scanf_s("%d",&fps);
 if ( NULL == fps)
 {
 fps = 10;
 }*/

 InitializeCriticalSection(&VideoSection);
 InitializeCriticalSection(&AudioSection);

 AVFrame *picture = av_frame_alloc();
 int size = avpicture_get_size(pFormatCtx_Out->streams[VideoIndex]->codec->pix_fmt, 
 pFormatCtx_Out->streams[VideoIndex]->codec->width, pFormatCtx_Out->streams[VideoIndex]->codec->height);
 picture_buf = new uint8_t[size];

 avpicture_fill((AVPicture *)picture, picture_buf, 
 pFormatCtx_Out->streams[VideoIndex]->codec->pix_fmt, 
 pFormatCtx_Out->streams[VideoIndex]->codec->width, 
 pFormatCtx_Out->streams[VideoIndex]->codec->height);



 //star cap screen thread
 CreateThread( NULL, 0, ScreenCapThreadProc, 0, 0, NULL);
 //star cap audio thread
 CreateThread( NULL, 0, AudioCapThreadProc, 0, 0, NULL);
 int64_t cur_pts_v=0,cur_pts_a=0;
 int VideoFrameIndex = 0, AudioFrameIndex = 0;

 while(1)
 {
 if (_kbhit() != 0 && bCap)
 {
 bCap = false;
 Sleep(2000);
 }
 if (fifo_audio && fifo_video)
 {
 int sizeAudio = av_audio_fifo_size(fifo_audio);
 int sizeVideo = av_fifo_size(fifo_video);
 //缓存数据写完就结束循环
 if (av_audio_fifo_size(fifo_audio) <= pFormatCtx_Out->streams[AudioIndex]->codec->frame_size && 
 av_fifo_size(fifo_video) <= frame_size && !bCap)
 {
 break;
 }
 }

 if(av_compare_ts(cur_pts_v, pFormatCtx_Out->streams[VideoIndex]->time_base, 
 cur_pts_a,pFormatCtx_Out->streams[AudioIndex]->time_base) <= 0)
 {
 if (av_fifo_size(fifo_video) < frame_size && !bCap)
 {
 cur_pts_v = 0x7fffffffffffffff;
 }
 if(av_fifo_size(fifo_video) >= size)
 {
 EnterCriticalSection(&VideoSection);
 av_fifo_generic_read(fifo_video, picture_buf, size, NULL); //将数据从avfifobuffer馈送到用户提供的回调。
 LeaveCriticalSection(&VideoSection);

 avpicture_fill((AVPicture *)picture, picture_buf,
 pFormatCtx_Out->streams[VideoIndex]->codec->pix_fmt,
 pFormatCtx_Out->streams[VideoIndex]->codec->width,
 pFormatCtx_Out->streams[VideoIndex]->codec->height); //根据指定的图像参数和提供的图像数据缓冲区设置图片字段。

 //pts = n * ((1 / timbase)/ fps);
 //picture->pts = VideoFrameIndex * ((pFormatCtx_Video->streams[0]->time_base.den / pFormatCtx_Video->streams[0]->time_base.num) / 24);
 picture->pts = VideoFrameIndex * ((outVideoCodecCtx->time_base.den * 100000 / outVideoCodecCtx->time_base.num) / 180);

 int got_picture = 0;
 AVPacket pkt;
 av_init_packet(&pkt);

 pkt.data = NULL;
 pkt.size = 0;
 //从帧中获取输入的原始视频数据
 int ret = avcodec_encode_video2(pFormatCtx_Out->streams[VideoIndex]->codec, &pkt, picture, &got_picture);
 if(ret < 0)
 {
 continue;
 }

 if (got_picture==1)
 {
 pkt.stream_index = VideoIndex;
 /*int count = 1;
 pkt.pts = pkt.dts = count * ((pFormatCtx_Video->streams[0]->time_base.den / pFormatCtx_Video->streams[0]->time_base.num) / 15);
 count++;*/

 //x = pts * (timebase1.num / timebase1.den )* (timebase2.den / timebase2.num);

 pkt.pts = av_rescale_q_rnd(pkt.pts, pFormatCtx_Video->streams[0]->time_base, 
 pFormatCtx_Out->streams[VideoIndex]->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX)); 
 pkt.dts = av_rescale_q_rnd(pkt.dts, pFormatCtx_Video->streams[0]->time_base, 
 pFormatCtx_Out->streams[VideoIndex]->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX)); 


 pkt.duration = ((pFormatCtx_Out->streams[0]->time_base.den / pFormatCtx_Out->streams[0]->time_base.num) / 60);
 //pkt.duration = 1000/60;
 //pkt.pts = pkt.dts = Count * (ofmt_ctx->streams[stream_index]->time_base.den) /ofmt_ctx->streams[stream_index]->time_base.num / 10;

 //Count++;


 cur_pts_v = pkt.pts;

 ret = av_interleaved_write_frame(pFormatCtx_Out, &pkt);
 //delete[] pkt.data;
 av_free_packet(&pkt);
 }
 VideoFrameIndex++;
 }
 }
 else
 {
 if (NULL == fifo_audio)
 {
 continue;//还未初始化fifo
 }
 if (av_audio_fifo_size(fifo_audio) < pFormatCtx_Out->streams[AudioIndex]->codec->frame_size && !bCap)
 {
 cur_pts_a = 0x7fffffffffffffff;
 }
 if(av_audio_fifo_size(fifo_audio) >= 
 (pFormatCtx_Out->streams[AudioIndex]->codec->frame_size > 0 ? pFormatCtx_Out->streams[AudioIndex]->codec->frame_size : 1024))
 {
 AVFrame *frame;
 frame = av_frame_alloc();
 frame->nb_samples = pFormatCtx_Out->streams[AudioIndex]->codec->frame_size>0 ? pFormatCtx_Out->streams[AudioIndex]->codec->frame_size: 1024;
 frame->channel_layout = pFormatCtx_Out->streams[AudioIndex]->codec->channel_layout;
 frame->format = pFormatCtx_Out->streams[AudioIndex]->codec->sample_fmt;
 frame->sample_rate = pFormatCtx_Out->streams[AudioIndex]->codec->sample_rate;
 av_frame_get_buffer(frame, 0);

 EnterCriticalSection(&AudioSection);
 av_audio_fifo_read(fifo_audio, (void **)frame->data, 
 (pFormatCtx_Out->streams[AudioIndex]->codec->frame_size > 0 ? pFormatCtx_Out->streams[AudioIndex]->codec->frame_size : 1024));
 LeaveCriticalSection(&AudioSection);

 AVPacket pkt_out;
 av_init_packet(&pkt_out);
 int got_picture = -1;
 pkt_out.data = NULL;
 pkt_out.size = 0;

 frame->pts = AudioFrameIndex * pFormatCtx_Out->streams[AudioIndex]->codec->frame_size;
 if (avcodec_encode_audio2(pFormatCtx_Out->streams[AudioIndex]->codec, &pkt_out, frame, &got_picture) < 0)
 {
 printf("can not decoder a frame");
 }
 av_frame_free(&frame);
 if (got_picture) 
 {
 pkt_out.stream_index = AudioIndex;
 pkt_out.pts = AudioFrameIndex * pFormatCtx_Out->streams[AudioIndex]->codec->frame_size;
 pkt_out.dts = AudioFrameIndex * pFormatCtx_Out->streams[AudioIndex]->codec->frame_size;
 pkt_out.duration = pFormatCtx_Out->streams[AudioIndex]->codec->frame_size;

 cur_pts_a = pkt_out.pts;

 int ret = av_interleaved_write_frame(pFormatCtx_Out, &pkt_out);
 av_free_packet(&pkt_out);
 }
 AudioFrameIndex++;
 }
 }
 }

 delete[] picture_buf;

 av_fifo_free(fifo_video);
 av_audio_fifo_free(fifo_audio);

 av_write_trailer(pFormatCtx_Out);

 avio_close(pFormatCtx_Out->pb);
 avformat_free_context(pFormatCtx_Out);

 if (pFormatCtx_Video != NULL)
 {
 avformat_close_input(&pFormatCtx_Video);
 pFormatCtx_Video = NULL;
 }
 if (pFormatCtx_Audio != NULL)
 {
 avformat_close_input(&pFormatCtx_Audio);
 pFormatCtx_Audio = NULL;
 }

 return 0;
}

DWORD WINAPI ScreenCapThreadProc( LPVOID lpParam )
{
 AVPacket packet;
 int got_picture;
 AVFrame *pFrame;
 pFrame=av_frame_alloc();

 AVFrame *picture = av_frame_alloc();
 int size = avpicture_get_size(pFormatCtx_Out->streams[VideoIndex]->codec->pix_fmt, 
 pFormatCtx_Out->streams[VideoIndex]->codec->width, 
 pFormatCtx_Out->streams[VideoIndex]->codec->height);

 avpicture_fill((AVPicture *)picture, picture_buf, 
 pFormatCtx_Out->streams[VideoIndex]->codec->pix_fmt, 
 pFormatCtx_Out->streams[VideoIndex]->codec->width, 
 pFormatCtx_Out->streams[VideoIndex]->codec->height);

 FILE *p = NULL;
 p = fopen("proc_test.yuv", "wb+");
 av_init_packet(&packet);
 int height = pFormatCtx_Out->streams[VideoIndex]->codec->height;
 int width = pFormatCtx_Out->streams[VideoIndex]->codec->width;
 int y_size=height*width;
 while(bCap)
 {
 packet.data = NULL;
 packet.size = 0;
 if (av_read_frame(pFormatCtx_Video, &packet) < 0)
 {
 continue;
 }
 if(packet.stream_index == 0)
 {
 if (avcodec_decode_video2(pCodecCtx_Video, pFrame, &got_picture, &packet) < 0)
 {
 printf("Decode Error.(解码错误)\n");
 continue;
 }
 if (got_picture)
 {
 sws_scale(img_convert_ctx, 
 (const uint8_t* const*)pFrame->data,
 pFrame->linesize, 
 0, 
 pFormatCtx_Out->streams[VideoIndex]->codec->height,
 picture->data,
 picture->linesize);

 if (av_fifo_space(fifo_video) >= size)
 {
 EnterCriticalSection(&VideoSection); 
 av_fifo_generic_write(fifo_video, picture->data[0], y_size, NULL);
 av_fifo_generic_write(fifo_video, picture->data[1], y_size/4, NULL);
 av_fifo_generic_write(fifo_video, picture->data[2], y_size/4, NULL);
 LeaveCriticalSection(&VideoSection);
 }
 }
 }
 av_free_packet(&packet);
 }
 av_frame_free(&pFrame);
 av_frame_free(&picture);
 return 0;
}

DWORD WINAPI AudioCapThreadProc( LPVOID lpParam )
{
 AVPacket pkt;
 AVFrame *frame;
 frame = av_frame_alloc();
 int gotframe;
 while(bCap)
 {
 pkt.data = NULL;
 pkt.size = 0;
 if(av_read_frame(pFormatCtx_Audio,&pkt) < 0)
 {
 continue;
 }

 if (avcodec_decode_audio4(pFormatCtx_Audio->streams[0]->codec, frame, &gotframe, &pkt) < 0)
 {
 av_frame_free(&frame);
 printf("can not decoder a frame");
 break;
 }
 av_free_packet(&pkt);

 if (!gotframe)
 {
 printf("没有获取到数据,继续下一次");
 continue;
 }

 if (NULL == fifo_audio)
 {
 fifo_audio = av_audio_fifo_alloc(pFormatCtx_Audio->streams[0]->codec->sample_fmt, 
 pFormatCtx_Audio->streams[0]->codec->channels, 30 * frame->nb_samples);
 }

 int buf_space = av_audio_fifo_space(fifo_audio);
 if (av_audio_fifo_space(fifo_audio) >= frame->nb_samples)
 {
 EnterCriticalSection(&AudioSection);
 av_audio_fifo_write(fifo_audio, (void **)frame->data, frame->nb_samples);
 LeaveCriticalSection(&AudioSection);
 }
 }
 av_frame_free(&frame);
 return 0;
}




Maybe there is another way to calculate PTS and DTS



I hope whatever the frame rate is,video playback speed is right.Not too fast or too slow.


-
libx264 encoder video plays too fast
23 avril 2014, par NickI’m trying to write a program that uses libx264 to encode the video frames. I’ve wrapped this code into a small class (see below). I have frames that are in YUV420 format. libx264 encodes the frames and I save them to a file. I can play the file back in VLC, all of the frames are there, but it plays back at several hundred times the actual frame rate. Currently I am capturing frames at 2.5 FPS, but they play back as if it was recorded at 250 or more FPS. I’ve tried to change the frame rate with no luck.
I’ve also tried to set
_param.b_vfr_input = 1
and then set the time bases appropriately, but that causes my program to crash. Any ideas ? My encode code is shown below. I’ve also included the output of ffprobe -show_frames
Wrapper Class :
x264wrapper::x264wrapper(int width, int height, int fps, int timeBaseNum, int timeBaseDen, int vfr)
{
x264_param_default_preset(&_param, "veryfast", "zerolatency");
_param.i_threads = 1;
_param.i_width = width;
_param.i_height = height;
_param.i_fps_num = fps;
_param.i_fps_den = 1;
// Intra refres:
_param.i_keyint_max = fps;
_param.b_intra_refresh = 1;
//Rate control:
_param.rc.i_rc_method = X264_RC_CRF;
//_param.rc.i_rc_method = X264_RC_CQP;
_param.rc.f_rf_constant = 25;
_param.rc.f_rf_constant_max = 35;
//For streaming:
_param.b_repeat_headers = 1;
_param.b_annexb = 1;
// misc
_param.b_vfr_input = vfr;
_param.i_timebase_num = timeBaseNum;
_param.i_timebase_den = timeBaseDen;
_param.i_log_level = X264_LOG_DEBUG;
_encoder = x264_encoder_open(&_param);
cout << "Timebase " << _param.i_timebase_num << "/" << _param.i_timebase_den << endl;
cout << "fps " << _param.i_fps_num << "/" << _param.i_fps_den << endl;
_ticks_per_frame = (int64_t)_param.i_timebase_den * _param.i_fps_den / _param.i_timebase_num / _param.i_fps_num;
cout << "ticks_per_frame " << _ticks_per_frame << endl;
int result = x264_picture_alloc(&_pic_in, X264_CSP_I420, width, height);
if (result != 0)
{
cout << "Failed to allocate picture" << endl;
throw(1);
}
_ofs = new ofstream("output.h264", ofstream::out | ofstream::binary);
_pts = 0;
}
x264wrapper::~x264wrapper(void)
{
_ofs->close();
}
void x264wrapper::encode(uint8_t * buf)
{
x264_nal_t* nals;
int i_nals;
convertFromBalserToX264(buf);
_pts += _ticks_per_frame;
_pic_in.i_pts = _pts;
x264_picture_t pic_out;
int frame_size = x264_encoder_encode(_encoder, &nals, &i_nals, &_pic_in, &pic_out);
if (frame_size >= 0)
{
_ofs->write((char*)nals[0].p_payload, frame_size);
}
else
{
cout << "error: x264_encoder_encode failed" << endl;
}
}Output of ffprobe -show_frames :
[FRAME]
media_type=video
key_frame=1
pkt_pts=N/A
pkt_pts_time=N/A
pkt_dts=N/A
pkt_dts_time=N/A
pkt_duration=48000
pkt_duration_time=0.040000
pkt_pos=0
width=1920
height=1080
pix_fmt=yuv420p
sample_aspect_ratio=N/A
pict_type=I
coded_picture_number=0
display_picture_number=0
interlaced_frame=0
top_field_first=0
repeat_pict=0
reference=0
[/FRAME]
[FRAME]
media_type=video
key_frame=0
pkt_pts=N/A
pkt_pts_time=N/A
pkt_dts=N/A
pkt_dts_time=N/A
pkt_duration=N/A
pkt_duration_time=N/A
pkt_pos=54947
width=1920
height=1080
pix_fmt=yuv420p
sample_aspect_ratio=N/A
pict_type=P
coded_picture_number=1
display_picture_number=0
interlaced_frame=0
top_field_first=0
repeat_pict=0
reference=0
[/FRAME]
[FRAME]
media_type=video
key_frame=0
pkt_pts=N/A
pkt_pts_time=N/A
pkt_dts=N/A
pkt_dts_time=N/A
pkt_duration=N/A
pkt_duration_time=N/A
pkt_pos=57899
width=1920
height=1080
pix_fmt=yuv420p
sample_aspect_ratio=N/A
pict_type=P
coded_picture_number=2
display_picture_number=0
interlaced_frame=0
top_field_first=0
repeat_pict=0
reference=0
[/FRAME] -
Fast video compression like Whatsapp
5 août 2015, par Douglas AnunciaçãoI need to speed up video compression in my Android app. I’m using FFMPEG and it takes 3 minutes to compress 80MB video. Does anyone knows a better solution ?
The command I’m using is :
/data/data/com.moymer/app_bin/ffmpeg -y -i /storage/emulated/0/DCIM/Camera/VID_20150803_164811363.mp4 -s 640x352 -r 25 -vcodec mpeg4 -ac 1 -preset ultrafast -strict -2 /storage/emulated/0/DCIM/Camera/compressed_video.mp4
I’m running this command using FFMPEG for Android from this github repo : https://github.com/guardianproject/android-ffmpeg-java
The code to use FFMPEG in my project is inside an AsyncTask and is copied below :
@Override
protected Object doInBackground(Object... params) {
ItemRoloDeCamera compressedVideo = new ItemRoloDeCamera();
File videoInputFile = new File(video.getSdcardPath());
File videoFolderFile = videoInputFile.getParentFile();
File videoOutputFile = new File(videoFolderFile, "video_comprimido_moymer.mp4");
if (videoFolderFile.exists())
android.util.Log.e("COMPRESS VIDEO UTILS", "video folder exist");
else
android.util.Log.e("COMPRESS VIDEO UTILS", "video folder DON'T exist");
if (videoInputFile.exists())
android.util.Log.e("COMPRESS VIDEO UTILS", "video input file exist");
else
android.util.Log.e("COMPRESS VIDEO UTILS", "video input file DON'T exist");
if (videoOutputFile.exists())
android.util.Log.e("COMPRESS VIDEO UTILS", "video output file exist");
else
android.util.Log.e("COMPRESS VIDEO UTILS", "video output file DON'T exist");
FfmpegController ffmpegController;
try {
ffmpegController = new FfmpegController(context, videoFolderFile);
Clip clipIn = new Clip(videoInputFile.getAbsolutePath());
ffmpegController.getInfo(clipIn, new ShellUtils.ShellCallback() {
@Override
public void shellOut(String shellLine) {
videoInfo.add(shellLine);
}
@Override
public void processComplete(int exitValue) {
videoInfo.add(String.valueOf(exitValue));
}
});
int rotate = getRotateMetadata();
Clip clipOut = new Clip(videoOutputFile.getAbsolutePath());
clipOut.videoFps = "24";
clipOut.videoBitrate = 512;
clipOut.audioChannels = 1;
clipOut.width = 640;
clipOut.height = 352;
if (rotate == 90)
clipOut.videoFilter = "transpose=1";
else if (rotate == 180)
clipOut.videoFilter = "transpose=1,transpose=1";
else if (rotate == 270)
clipOut.videoFilter = "transpose=1,transpose=1,transpose=1";
millisDuration = getVideoDuration(videoInputFile.getAbsolutePath());
float secondsDuration = millisDuration / 1000f;
clipOut.duration = secondsDuration;
ffmpegController.processVideo(clipIn, clipOut, true, new ShellUtils.ShellCallback() {
@Override
public void shellOut(String shellLine) {
android.util.Log.e("COMPRESS VIDEO UTILS", "shellOut - " + shellLine);
float percentage = getTimeMetadata(shellLine);
if (percentage >= 0f)
publishProgress(percentage);
}
@Override
public void processComplete(int exitValue) {
android.util.Log.e("COMPRESS VIDEO UTILS", "proccess complete - " + exitValue);
}
});
} catch (IOException e) {
e.printStackTrace();
} catch (Exception e) {
e.printStackTrace();
} finally {
if (videoOutputFile.exists()) {
android.util.Log.e("COMPRESS VIDEO UTILS", "finished ffmpeg ---> video output file exist");
compressedVideo.setSdcardPath(videoOutputFile.getAbsolutePath());
return compressedVideo;
} else
android.util.Log.e("COMPRESS VIDEO UTILS", "finished ffmpeg ---> video output file DON'T exist");
}
return compressedVideo;
}
private float getTimeMetadata(String shellLine) {
float percentage = -1;
if (shellLine.contains("time=")) {
String[] timeLine = shellLine.split("=");
String time = timeLine[5];
time = time.replace("bitrate", "");
time = time.trim();
// String source = "00:10:17";
String[] tokens = time.split(":");
int secondsToMs = (int) (Float.parseFloat(tokens[2]) * 1000);
int minutesToMs = Integer.parseInt(tokens[1]) * 60000;
int hoursToMs = Integer.parseInt(tokens[0]) * 3600000;
long timeInMillis = secondsToMs + minutesToMs + hoursToMs;
percentage = (timeInMillis * 100.0f) / millisDuration;
}
return percentage;
}
private int getRotateMetadata() {
int rotate = 0;
String durationString = "";
for (String shellLine : videoInfo) {
if (shellLine.contains("rotate")) {
//rotate : 270
String[] rotateLine = shellLine.split(":");
rotate = Integer.parseInt(rotateLine[1].trim());
}
}
return rotate;
}
public static long getVideoDuration(String videoPath) {
MediaMetadataRetriever retriever = new MediaMetadataRetriever();
retriever.setDataSource(videoPath);
String time = retriever
.extractMetadata(MediaMetadataRetriever.METADATA_KEY_DURATION);
long timeInmillisec = Long.parseLong(time);
return timeInmillisec;
}The only change I made in the processVideo method was to add the following lines when building the commmand :
cmd.add("-preset");
cmd.add("ultrafast");