
Recherche avancée
Médias (1)
-
Rennes Emotion Map 2010-11
19 octobre 2011, par
Mis à jour : Juillet 2013
Langue : français
Type : Texte
Autres articles (25)
-
Publier sur MédiaSpip
13 juin 2013Puis-je poster des contenus à partir d’une tablette Ipad ?
Oui, si votre Médiaspip installé est à la version 0.2 ou supérieure. Contacter au besoin l’administrateur de votre MédiaSpip pour le savoir -
Support de tous types de médias
10 avril 2011Contrairement à beaucoup de logiciels et autres plate-formes modernes de partage de documents, MediaSPIP a l’ambition de gérer un maximum de formats de documents différents qu’ils soient de type : images (png, gif, jpg, bmp et autres...) ; audio (MP3, Ogg, Wav et autres...) ; vidéo (Avi, MP4, Ogv, mpg, mov, wmv et autres...) ; contenu textuel, code ou autres (open office, microsoft office (tableur, présentation), web (html, css), LaTeX, Google Earth) (...)
-
Supporting all media types
13 avril 2011, parUnlike most software and media-sharing platforms, MediaSPIP aims to manage as many different media types as possible. The following are just a few examples from an ever-expanding list of supported formats : images : png, gif, jpg, bmp and more audio : MP3, Ogg, Wav and more video : AVI, MP4, OGV, mpg, mov, wmv and more text, code and other data : OpenOffice, Microsoft Office (Word, PowerPoint, Excel), web (html, CSS), LaTeX, Google Earth and (...)
Sur d’autres sites (4235)
-
How to fix ffmpeg's official tutorials03 bug that sound does't work well ? [on hold]
31 janvier 2019, par xiaodaiI want to make a player with ffmpeg and sdl. The tutorial I used is this though I have resampled the audio from decode stream, the sound still plays with loud noise.
I have no ideas to fix it anymore.
I used the following :
- the latest ffmpeg and sdl1
- Visual Studio 2010
// tutorial03.c
// A pedagogical video player that will stream through every video frame as fast as it can
// and play audio (out of sync).
//
// This tutorial was written by Stephen Dranger (dranger@gmail.com).
//
// Code based on FFplay, Copyright (c) 2003 Fabrice Bellard,
// and a tutorial by Martin Bohme (boehme@inb.uni-luebeckREMOVETHIS.de)
// Tested on Gentoo, CVS version 5/01/07 compiled with GCC 4.1.1
//
// Use the Makefile to build all examples.
//
// Run using
// tutorial03 myvideofile.mpg
//
// to play the stream on your screen.
extern "C"{
#include <libavcodec></libavcodec>avcodec.h>
#include <libavformat></libavformat>avformat.h>
#include <libswscale></libswscale>swscale.h>
#include <libavutil></libavutil>channel_layout.h>
#include <libavutil></libavutil>common.h>
#include <libavutil></libavutil>frame.h>
#include <libavutil></libavutil>samplefmt.h>
#include "libswresample/swresample.h"
#include <sdl></sdl>SDL.h>
#include <sdl></sdl>SDL_thread.h>
};
#ifdef __WIN32__
#undef main /* Prevents SDL from overriding main() */
#endif
#include
#define SDL_AUDIO_BUFFER_SIZE 1024
#define MAX_AUDIO_FRAME_SIZE 192000
struct SwrContext *audio_swrCtx;
FILE *pFile=fopen("output.pcm", "wb");
FILE *pFile_stream=fopen("output_stream.pcm","wb");
int audio_len;
typedef struct PacketQueue {
AVPacketList *first_pkt, *last_pkt;
int nb_packets;
int size;
SDL_mutex *mutex;
SDL_cond *cond;
} PacketQueue;
PacketQueue audioq;
int quit = 0;
void packet_queue_init(PacketQueue *q) {
memset(q, 0, sizeof(PacketQueue));
q->mutex = SDL_CreateMutex();
q->cond = SDL_CreateCond();
}
int packet_queue_put(PacketQueue *q, AVPacket *pkt) {
AVPacketList *pkt1;
if(av_dup_packet(pkt) < 0) {
return -1;
}
pkt1 = (AVPacketList *)av_malloc(sizeof(AVPacketList));
if(!pkt1) {
return -1;
}
pkt1->pkt = *pkt;
pkt1->next = NULL;
SDL_LockMutex(q->mutex);
if(!q->last_pkt) {
q->first_pkt = pkt1;
}
else {
q->last_pkt->next = pkt1;
}
q->last_pkt = pkt1;
q->nb_packets++;
q->size += pkt1->pkt.size;
SDL_CondSignal(q->cond);
SDL_UnlockMutex(q->mutex);
return 0;
}
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block) {
AVPacketList *pkt1;
int ret;
SDL_LockMutex(q->mutex);
for(;;) {
if(quit) {
ret = -1;
break;
}
pkt1 = q->first_pkt;
if(pkt1) {
q->first_pkt = pkt1->next;
if(!q->first_pkt) {
q->last_pkt = NULL;
}
q->nb_packets--;
q->size -= pkt1->pkt.size;
*pkt = pkt1->pkt;
av_free(pkt1);
ret = 1;
break;
} else if(!block) {
ret = 0;
break;
} else {
SDL_CondWait(q->cond, q->mutex);
}
}
SDL_UnlockMutex(q->mutex);
return ret;
}
int audio_decode_frame(AVCodecContext *aCodecCtx, uint8_t *audio_buf, int buf_size) {
static AVPacket pkt;
static uint8_t *audio_pkt_data = NULL;
static int audio_pkt_size = 0;
static AVFrame frame;
int len1, data_size = 0;
for(;;) {
while(audio_pkt_size > 0) {
int got_frame = 0;
len1 = avcodec_decode_audio4(aCodecCtx, &frame, &got_frame, &pkt);
if(len1 < 0) {
/* if error, skip frame */
audio_pkt_size = 0;
break;
}
audio_pkt_data += len1;
audio_pkt_size -= len1;
data_size = 0;
/*
au_convert_ctx = swr_alloc();
au_convert_ctx=swr_alloc_set_opts(au_convert_ctx,out_channel_layout, out_sample_fmt, out_sample_rate,
in_channel_layout,pCodecCtx->sample_fmt , pCodecCtx->sample_rate,0, NULL);
swr_init(au_convert_ctx);
swr_convert(au_convert_ctx,&out_buffer, MAX_AUDIO_FRAME_SIZE,(const uint8_t **)pFrame->data , pFrame->nb_samples);
*/
if( got_frame ) {
audio_swrCtx=swr_alloc();
audio_swrCtx=swr_alloc_set_opts(audio_swrCtx, // we're allocating a new context
AV_CH_LAYOUT_STEREO,//AV_CH_LAYOUT_STEREO, // out_ch_layout
AV_SAMPLE_FMT_S16, // out_sample_fmt
44100, // out_sample_rate
aCodecCtx->channel_layout, // in_ch_layout
aCodecCtx->sample_fmt, // in_sample_fmt
aCodecCtx->sample_rate, // in_sample_rate
0, // log_offset
NULL); // log_ctx
int ret=swr_init(audio_swrCtx);
int out_samples = av_rescale_rnd(swr_get_delay(audio_swrCtx, aCodecCtx->sample_rate) + 1024, 44100, aCodecCtx->sample_rate, AV_ROUND_UP);
ret=swr_convert(audio_swrCtx,&audio_buf, MAX_AUDIO_FRAME_SIZE,(const uint8_t **)frame.data ,frame.nb_samples);
data_size =
av_samples_get_buffer_size
(
&data_size,
av_get_channel_layout_nb_channels(AV_CH_LAYOUT_STEREO),
ret,
AV_SAMPLE_FMT_S16,
1
);
fwrite(audio_buf, 1, data_size, pFile);
//memcpy(audio_buf, frame.data[0], data_size);
swr_free(&audio_swrCtx);
}
if(data_size <= 0) {
/* No data yet, get more frames */
continue;
}
/* We have data, return it and come back for more later */
return data_size;
}
if(pkt.data) {
av_free_packet(&pkt);
}
if(quit) {
return -1;
}
if(packet_queue_get(&audioq, &pkt, 1) < 0) {
return -1;
}
audio_pkt_data = pkt.data;
audio_pkt_size = pkt.size;
}
}
void audio_callback(void *userdata, Uint8 *stream, int len) {
AVCodecContext *aCodecCtx = (AVCodecContext *)userdata;
int /*audio_len,*/ audio_size;
static uint8_t audio_buf[(MAX_AUDIO_FRAME_SIZE * 3) / 2];
static unsigned int audio_buf_size = 0;
static unsigned int audio_buf_index = 0;
//SDL_memset(stream, 0, len);
while(len > 0) {
if(audio_buf_index >= audio_buf_size) {
/* We have already sent all our data; get more */
audio_size = audio_decode_frame(aCodecCtx, audio_buf, audio_buf_size);
if(audio_size < 0) {
/* If error, output silence */
audio_buf_size = 1024; // arbitrary?
memset(audio_buf, 0, audio_buf_size);
} else {
audio_buf_size = audio_size;
}
audio_buf_index = 0;
}
audio_len = audio_buf_size - audio_buf_index;
if(audio_len > len) {
audio_len = len;
}
memcpy(stream, (uint8_t *)audio_buf , audio_len);
//SDL_MixAudio(stream,(uint8_t*)audio_buf,audio_len,SDL_MIX_MAXVOLUME);
fwrite(audio_buf, 1, audio_len, pFile_stream);
len -= audio_len;
stream += audio_len;
audio_buf_index += audio_len;
audio_len=len;
}
}
int main(int argc, char *argv[]) {
AVFormatContext *pFormatCtx = NULL;
int i, videoStream, audioStream;
AVCodecContext *pCodecCtx = NULL;
AVCodec *pCodec = NULL;
AVFrame *pFrame = NULL;
AVPacket packet;
int frameFinished;
//float aspect_ratio;
AVCodecContext *aCodecCtx = NULL;
AVCodec *aCodec = NULL;
SDL_Overlay *bmp = NULL;
SDL_Surface *screen = NULL;
SDL_Rect rect;
SDL_Event event;
SDL_AudioSpec wanted_spec, spec;
struct SwsContext *sws_ctx = NULL;
AVDictionary *videoOptionsDict = NULL;
AVDictionary *audioOptionsDict = NULL;
if(argc < 2) {
fprintf(stderr, "Usage: test <file>\n");
exit(1);
}
// Register all formats and codecs
av_register_all();
if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
exit(1);
}
// Open video file
if(avformat_open_input(&pFormatCtx, argv[1]/*"file.mov"*/, NULL, NULL) != 0) {
return -1; // Couldn't open file
}
// Retrieve stream information
if(avformat_find_stream_info(pFormatCtx, NULL) < 0) {
return -1; // Couldn't find stream information
}
// Dump information about file onto standard error
av_dump_format(pFormatCtx, 0, argv[1], 0);
// Find the first video stream
videoStream = -1;
audioStream = -1;
for(i = 0; i < pFormatCtx->nb_streams; i++) {
if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
videoStream < 0) {
videoStream = i;
}
if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO &&
audioStream < 0) {
audioStream = i;
}
}
if(videoStream == -1) {
return -1; // Didn't find a video stream
}
if(audioStream == -1) {
return -1;
}
aCodecCtx = pFormatCtx->streams[audioStream]->codec;
// Set audio settings from codec info
wanted_spec.freq = 44100;
wanted_spec.format = AUDIO_S16SYS;
wanted_spec.channels = av_get_channel_layout_nb_channels(AV_CH_LAYOUT_STEREO);;
wanted_spec.silence = 0;
wanted_spec.samples = 1024;
wanted_spec.callback = audio_callback;
wanted_spec.userdata = aCodecCtx;
if(SDL_OpenAudio(&wanted_spec, &spec) < 0) {
fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
return -1;
}
aCodec = avcodec_find_decoder(aCodecCtx->codec_id);
if(!aCodec) {
fprintf(stderr, "Unsupported codec!\n");
return -1;
}
avcodec_open2(aCodecCtx, aCodec, &audioOptionsDict);
// audio_st = pFormatCtx->streams[index]
packet_queue_init(&audioq);
SDL_PauseAudio(0);
// Get a pointer to the codec context for the video stream
pCodecCtx = pFormatCtx->streams[videoStream]->codec;
// Find the decoder for the video stream
pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
if(pCodec == NULL) {
fprintf(stderr, "Unsupported codec!\n");
return -1; // Codec not found
}
// Open codec
if(avcodec_open2(pCodecCtx, pCodec, &videoOptionsDict) < 0) {
return -1; // Could not open codec
}
// Allocate video frame
pFrame = av_frame_alloc();
// Make a screen to put our video
#ifndef __DARWIN__
screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 0, 0);
#else
screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 24, 0);
#endif
if(!screen) {
fprintf(stderr, "SDL: could not set video mode - exiting\n");
exit(1);
}
// Allocate a place to put our YUV image on that screen
bmp = SDL_CreateYUVOverlay(pCodecCtx->width,
pCodecCtx->height,
SDL_YV12_OVERLAY,
screen);
sws_ctx =
sws_getContext
(
pCodecCtx->width,
pCodecCtx->height,
pCodecCtx->pix_fmt,
pCodecCtx->width,
pCodecCtx->height,
PIX_FMT_YUV420P,
SWS_BILINEAR,
NULL,
NULL,
NULL
);
// Read frames and save first five frames to disk
i = 0;
while(av_read_frame(pFormatCtx, &packet) >= 0) {
// Is this a packet from the video stream?
if(packet.stream_index == videoStream) {
// Decode video frame
avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished,
&packet);
// Did we get a video frame?
if(frameFinished) {
SDL_LockYUVOverlay(bmp);
AVPicture pict;
pict.data[0] = bmp->pixels[0];
pict.data[1] = bmp->pixels[2];
pict.data[2] = bmp->pixels[1];
pict.linesize[0] = bmp->pitches[0];
pict.linesize[1] = bmp->pitches[2];
pict.linesize[2] = bmp->pitches[1];
// Convert the image into YUV format that SDL uses
sws_scale
(
sws_ctx,
(uint8_t const * const *)pFrame->data,
pFrame->linesize,
0,
pCodecCtx->height,
pict.data,
pict.linesize
);
SDL_UnlockYUVOverlay(bmp);
rect.x = 0;
rect.y = 0;
rect.w = pCodecCtx->width;
rect.h = pCodecCtx->height;
SDL_DisplayYUVOverlay(bmp, &rect);
SDL_Delay(40);
av_free_packet(&packet);
}
} else if(packet.stream_index == audioStream) {
packet_queue_put(&audioq, &packet);
} else {
av_free_packet(&packet);
}
// Free the packet that was allocated by av_read_frame
SDL_PollEvent(&event);
switch(event.type) {
case SDL_QUIT:
quit = 1;
SDL_Quit();
exit(0);
break;
default:
break;
}
}
// Free the YUV frame
av_free(pFrame);
/*swr_free(&audio_swrCtx);*/
// Close the codec
avcodec_close(pCodecCtx);
fclose(pFile);
fclose(pFile_stream);
// Close the video file
avformat_close_input(&pFormatCtx);
return 0;
}
</file>I hope to play normally.
-
How to add delay between input and outpout with ffmpeg
12 février 2019, par ManYouTrollI would like to add the delay so that it leaves for example 1 minutes after having received with this command :
ffmpeg -i rtmp://xx1.xx1.xx1.xx1/live -f flv rtmp://xx2.xx2.xx2.xx2/delay/pass
So a shift of 1 minute between the 2 video streams.
I hope you can help me, thank you in advance.
-
Access to macroblocks and motion values with libx264
6 mars 2019, par EnockI have a question and I hope someone can help me.
I want to extract all motion vectors in P_type mocroblock in a P_frame during encoding
h264 video with libx264 (x264). libx264 uses X264Context in the function x264_frame to encode
video. Then, I want to know how can I access to macroblocks in a picture and
how to access to motion vectors table on this picture while using the X264Context struct.
With MpegEncContext we can access them while using MpegEncContext struct’s mb_type, *mvs[2], mb_width, mb_height and mb_stride. But I don’t know how to have such informations (
mb_type, *mvs[2], mb_width, mb_height and mb_stride) with X264Context struct.Please I need your help.
Regards