
Recherche avancée
Médias (2)
-
Granite de l’Aber Ildut
9 septembre 2011, par
Mis à jour : Septembre 2011
Langue : français
Type : Texte
-
Géodiversité
9 septembre 2011, par ,
Mis à jour : Août 2018
Langue : français
Type : Texte
Autres articles (95)
-
Personnaliser en ajoutant son logo, sa bannière ou son image de fond
5 septembre 2013, parCertains thèmes prennent en compte trois éléments de personnalisation : l’ajout d’un logo ; l’ajout d’une bannière l’ajout d’une image de fond ;
-
Ecrire une actualité
21 juin 2013, parPrésentez les changements dans votre MédiaSPIP ou les actualités de vos projets sur votre MédiaSPIP grâce à la rubrique actualités.
Dans le thème par défaut spipeo de MédiaSPIP, les actualités sont affichées en bas de la page principale sous les éditoriaux.
Vous pouvez personnaliser le formulaire de création d’une actualité.
Formulaire de création d’une actualité Dans le cas d’un document de type actualité, les champs proposés par défaut sont : Date de publication ( personnaliser la date de publication ) (...) -
Publier sur MédiaSpip
13 juin 2013Puis-je poster des contenus à partir d’une tablette Ipad ?
Oui, si votre Médiaspip installé est à la version 0.2 ou supérieure. Contacter au besoin l’administrateur de votre MédiaSpip pour le savoir
Sur d’autres sites (4900)
-
FFMPEG merging sounds with amix cause volume problem [duplicate]
10 mars 2020, par birdcageI am trying to combine many sounds with
FFMPEG
amix
command and I need to set the starting moment of every sound. Every sound has different duration.When I use the command below, the volume of first and last item seems different in merged file. It seems the level of sound increases till the end of merged sound. I am wondering what I do wrong.
ffmpeg -i 1.wav -i 2.wav -i 3.wav -i 4.wav -filter_complex
"[0]adelay=1000|1000[a];
[1]adelay=30000|30000[b];
[2]adelay=50000|50000[c];
[3]adelay=200000|200000[d];
[a][b][c][d]amix=4"
/Users/username/Desktop/final.wav -
Switch to Matomo for WordPress from Google Analytics
-
record rtsp stream to file(muxing)
11 avril 2014, par user3521863AVFormatContext *g_oc = NULL;
AVStream *g_in_audio_st, *g_in_video_st;
AVStream *g_out_audio_st, *g_out_video_st;
int audio_pts = 0, video_pts = 0, audio_dts = 0, video_dts = 0;
int last_video_pts = 0;
AVPacket outpkt, *av_pkt;
// initialize video codec
static void init_video_codec(AVFormatContext *context) {
LOGI(1, "enter init_video_codec");
AVFormatContext *in_format_ctx = NULL;
AVCodecContext *avcodec_ctx = NULL;
int fps = 0;
if(context->streams[1]->r_frame_rate.num != AV_NOPTS_VALUE &&
context->streams[1]->r_frame_rate.den != 0)
fps = context->streams[1]->r_frame_rate.num / context->streams[1]->r_frame_rate.den;
else
fps = 25;
g_out_video_st = avformat_new_stream(g_oc, context->streams[1]->codec->codec);
LOGI(1, "video avformat_new_stream");
if( g_out_video_st == NULL ) {
LOGE(1, "Fail to Allocate Output Video Stream");
return ;
}
else {
LOGI(1, "Allocated Video Stream");
if( avcodec_copy_context(g_out_video_st->codec, context->streams[1]->codec) != 0 ) {
LOGE(1, "Failed to video Copy Context");
return ;
}
else {
LOGI(1, "Success to video Copy Context");
// how to setting video stream parameter?
g_out_video_st->sample_aspect_ratio.den = g_in_video_st->codec->sample_aspect_ratio.den;
g_out_video_st->sample_aspect_ratio.num = g_in_video_st->codec->sample_aspect_ratio.num;
g_out_video_st->codec->codec_id = g_in_video_st->codec->codec->id;
g_out_video_st->codec->time_base.num = 1;
g_out_video_st->codec->time_base.den = fps * (g_in_video_st->codec->ticks_per_frame);
g_out_video_st->time_base.num = 1;
g_out_video_st->time_base.den = 1000;
g_out_video_st->r_frame_rate.num = fps;
g_out_video_st->r_frame_rate.den = 1;
g_out_video_st->avg_frame_rate.den = 1;
g_out_video_st->avg_frame_rate.num = fps;
g_out_video_st->codec->width = g_frame_width;
g_out_video_st->codec->height = g_frame_height;
g_out_video_st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
}
LOGI(1, "end video init");
}
// initialize audio codec
static void init_audio_codec(AVFormatContext *context) {
LOGI(1, "enter init_audio_codec");
AVFormatContext *in_format_ctx = NULL;
AVCodecContext *avcodec_ctx = NULL;
g_out_audio_st = avformat_new_stream(g_oc, context->streams[0]->codec->codec);
LOGI(1, "audio avformat_new_stream");
if( avcodec_copy_context(g_out_audio_st->codec, context->streams[0]->codec) != 0 ) {
LOGE(1, "Failed to Copy audio Context");
return ;
}
else {
LOGI(1, "Success to Copy audio Context");
// how to setting video stream parameter?
g_out_audio_st->codec->codec_id = g_in_audio_st->codec->codec_id;
g_out_audio_st->codec->codec_tag = 0;
g_out_audio_st->pts = g_in_audio_st->pts;
g_out_audio_st->time_base.num = g_in_audio_st->time_base.num;
g_out_audio_st->time_base.den = g_in_audio_st->time_base.den;
g_out_audio_st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
LOGI(1, "end init audio");
}
// write video stream
static void write_video_stream(AVPacket *pkt) {
av_pkt = NULL;
av_pkt = pkt;
if( pkt == NULL || sizeof(*pkt) == 0 )
return;
av_rescale_q(av_pkt->pts, g_in_video_st->time_base, g_in_video_st->codec->time_base);
av_rescale_q(av_pkt->dts, g_in_video_st->time_base, g_in_video_st->codec->time_base);
av_init_packet(&outpkt);
if( av_pkt->pts != AV_NOPTS_VALUE ) {
if( last_video_pts == video_pts ) {
video_pts++;
last_video_pts = video_pts;
}
outpkt.pts = video_pts;
}
else {
outpkt.pts = AV_NOPTS_VALUE;
}
if( av_pkt->dts == AV_NOPTS_VALUE )
outpkt.dts = AV_NOPTS_VALUE;
else
outpkt.dts = video_pts;
outpkt.data = av_pkt->data;
outpkt.size = av_pkt->size;
outpkt.stream_index = av_pkt->stream_index;
outpkt.flags |= AV_PKT_FLAG_KEY;
last_video_pts = video_pts;
if(av_interleaved_write_frame(g_oc, &outpkt) < 0) {
// if(av_write_frame(g_oc, &outpkt) < 0) {
LOGE(1, "Failed Video Write");
}
else {
g_out_video_st->codec->frame_number++;
}
if( !&outpkt || sizeof(outpkt) == 0 )
return;
if( !av_pkt || sizeof(*av_pkt) == 0 )
return;
av_free_packet(&outpkt);
}
// write audio stream
static void write_audio_stream(AVPacket *pkt) {
av_pkt = NULL;
av_pkt = pkt;
if( pkt == NULL || sizeof(*pkt) == 0 )
return;
av_rescale_q(av_pkt->pts, g_in_audio_st->time_base, g_in_audio_st->codec->time_base);
av_rescale_q(av_pkt->dts, g_in_audio_st->time_base, g_in_audio_st->codec->time_base);
av_init_packet(&outpkt);
if(av_pkt->pts != AV_NOPTS_VALUE)
outpkt.pts = audio_pts;
else
outpkt.pts = AV_NOPTS_VALUE;
if(av_pkt->dts == AV_NOPTS_VALUE)
outpkt.dts = AV_NOPTS_VALUE;
else {
outpkt.dts = audio_pts;
if( outpkt.pts >= outpkt.dts)
outpkt.dts = outpkt.pts;
if(outpkt.dts == audio_dts)
outpkt.dts++;
if(outpkt.pts < outpkt.dts) {
outpkt.pts = outpkt.dts;
audio_pts = outpkt.pts;
}
outpkt.data = av_pkt->data;
outpkt.size = av_pkt->size;
outpkt.stream_index = av_pkt->stream_index;
outpkt.flags |= AV_PKT_FLAG_KEY;
video_pts = audio_pts;
audio_pts++;
if( av_interleaved_write_frame(g_oc, &outpkt) < 0 ) {
// if( av_write_frame(g_oc, &outpkt) < 0 ) {
LOGE(1, "Failed Audio Write");
}
else {
g_out_audio_st->codec->frame_number++;
}
if( !&outpkt || sizeof(outpkt) == 0 )
return;
if( !av_pkt || sizeof(*av_pkt) == 0 )
return;
av_free_packet(&outpkt);
}
}here result : recorded file
here full source : player.cI want to record rtsp stream to file on playing
i try tested video and audio streams while changing the parameters
but this result file does not match sync between video and audio
i try search about ffmpeg but almost command run or video recording was only.
please advice me.