
Recherche avancée
Médias (91)
-
Richard Stallman et le logiciel libre
19 octobre 2011, par
Mis à jour : Mai 2013
Langue : français
Type : Texte
-
Stereo master soundtrack
17 octobre 2011, par
Mis à jour : Octobre 2011
Langue : English
Type : Audio
-
Elephants Dream - Cover of the soundtrack
17 octobre 2011, par
Mis à jour : Octobre 2011
Langue : English
Type : Image
-
#7 Ambience
16 octobre 2011, par
Mis à jour : Juin 2015
Langue : English
Type : Audio
-
#6 Teaser Music
16 octobre 2011, par
Mis à jour : Février 2013
Langue : English
Type : Audio
-
#5 End Title
16 octobre 2011, par
Mis à jour : Février 2013
Langue : English
Type : Audio
Autres articles (73)
-
Support de tous types de médias
10 avril 2011Contrairement à beaucoup de logiciels et autres plate-formes modernes de partage de documents, MediaSPIP a l’ambition de gérer un maximum de formats de documents différents qu’ils soient de type : images (png, gif, jpg, bmp et autres...) ; audio (MP3, Ogg, Wav et autres...) ; vidéo (Avi, MP4, Ogv, mpg, mov, wmv et autres...) ; contenu textuel, code ou autres (open office, microsoft office (tableur, présentation), web (html, css), LaTeX, Google Earth) (...)
-
MediaSPIP v0.2
21 juin 2013, parMediaSPIP 0.2 est la première version de MediaSPIP stable.
Sa date de sortie officielle est le 21 juin 2013 et est annoncée ici.
Le fichier zip ici présent contient uniquement les sources de MediaSPIP en version standalone.
Comme pour la version précédente, il est nécessaire d’installer manuellement l’ensemble des dépendances logicielles sur le serveur.
Si vous souhaitez utiliser cette archive pour une installation en mode ferme, il vous faudra également procéder à d’autres modifications (...) -
Les tâches Cron régulières de la ferme
1er décembre 2010, parLa gestion de la ferme passe par l’exécution à intervalle régulier de plusieurs tâches répétitives dites Cron.
Le super Cron (gestion_mutu_super_cron)
Cette tâche, planifiée chaque minute, a pour simple effet d’appeler le Cron de l’ensemble des instances de la mutualisation régulièrement. Couplée avec un Cron système sur le site central de la mutualisation, cela permet de simplement générer des visites régulières sur les différents sites et éviter que les tâches des sites peu visités soient trop (...)
Sur d’autres sites (5111)
-
Organic Traffic : What It Is and How to Increase It
19 septembre 2023, par Erin — Analytics Tips -
Make AVI file from H264 compressed data
6 avril 2017, par vominhtien961476I’m using ffmpeg libraries to create a AVI file and following the
muxing.c ffmpeg
example as below- Allocate the output media context :
avformat_alloc_output_context2
-
Add video streams using the
AV_CODEC_ID_H264
codec with below set of parameters :int AddVideoStream(AVStream *&video_st, AVFormatContext *&oc, AVCodec **codec, enum AVCodecID codec_id){
AVCodecContext *c;
/* find the encoder */
*codec = avcodec_find_encoder(codec_id); //codec id = AV_CODEC_ID_H264
if (!(*codec)) {
sprintf(strError , "Could not find encoder for '%s' line %d\n", avcodec_get_name(codec_id), __LINE__);
commonGlobal->WriteRuntimeBackupLogs(strError);
return RS_NOT_OK;
}
video_st = avformat_new_stream(oc, *codec);
if (!video_st) {
sprintf(strError , "Could not allocate stream line %d\n", __LINE__);
commonGlobal->WriteRuntimeBackupLogs(strError);
return RS_NOT_OK;
}
video_st->id = oc->nb_streams-1;
c = video_st->codec;
avcodec_get_context_defaults3(c, *codec);
c->codec_id = codec_id;
c->bit_rate = 500*1000;
/* Resolution must be a multiple of two. */
c->width = 1280;
c->height = 720;
/* timebase: This is the fundamental unit of time (in seconds) in terms
* of which frame timestamps are represented. For fixed-fps content,
* timebase should be 1/framerate and timestamp increments should be
* identical to 1. */
c->time_base.den = 25*1000;
c->time_base.num = 1000;
c->gop_size = 12;//(int)(av_q2d(c->time_base) / 2); // GOP size is framerate/2
c->pix_fmt = STREAM_PIX_FMT;
/* Some formats want stream headers to be separate. */
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
c->flags |= CODEC_FLAG_GLOBAL_HEADER;
return RS_OK; -
Open Video stream :
open_video
int open_video( AVFormatContext *oc, AVCodec *codec, AVStream *st ){
int ret;
AVCodecContext *c = st->codec;
char strError[STR_LENGTH_256];
/* open the codec */
ret = avcodec_open2(c, codec, NULL);
if (ret < 0) {
sprintf(strError , "Could not open video codec line %d", __LINE__);
commonGlobal->WriteRuntimeBackupLogs(strError);
return RS_NOT_OK;
}
/* allocate and init a re-usable frame */
frame = avcodec_alloc_frame();
if (!frame) {
sprintf(strError , "Could not allocate video frame line %d", __LINE__);
commonGlobal->WriteRuntimeBackupLogs(strError);
return RS_NOT_OK;
}
/* Allocate the encoded raw picture. */
ret = avpicture_alloc(&dst_picture, c->pix_fmt, c->width, c->height);
if (ret < 0) {
sprintf(strError , "Could not allocate picture line %d", __LINE__);
commonGlobal->WriteRuntimeBackupLogs(strError);
return RS_NOT_OK;
}
/* If the output format is not YUV420P, then a temporary YUV420P
* picture is needed too. It is then converted to the required
* output format. */
if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
ret = avpicture_alloc(&src_picture, AV_PIX_FMT_YUV420P, c->width, c->height);
if (ret < 0) {
sprintf(strError , "Could not allocate temporary picture line %d", __LINE__);
commonGlobal->WriteRuntimeBackupLogs(strError);
return RS_NOT_OK;
}
}
/* copy data and linesize picture pointers to frame */
*((AVPicture *)frame) = dst_picture;
return RS_OK; -
Write AVI stream header :
avformat_write_header
-
Encode video frame :
avcodec_encode_video2
Case a : The input here are
BRG frames
so I encode them to H264 and pass to the next step.Case b : The input here are
H264 compressed frames (these frames captured from H264 RTP stream)
so I leave this step then move to next step. -
Write Interleave Video frame :
av_interleaved_write_frame(oc, &pkt)
Case a : Writing the packet data encoded from step 5 correctly without error.
Case b : I Always get error from
av_interleaved_write_frame
with value -22. It could be EINVAL invalid argument. So someone can tell me what is wrong ? or Some parameters I was missing here.int WriteVideoFrame(AVFormatContext *&oc, AVStream *&st,
uint8_t *imageData `/*BRG data input*/`,
int width,
int height,
bool isStart,
bool isData,
bool isCompressed,
AVPacket* packet `/*H264 data input*/`)if (isCompressed == false)// For BRG data
static struct SwsContext *sws_ctx;
AVCodecContext *c = st->codec;
if (isData)
{
if (!frame) {
//fprintf(stderr, "Could not allocate video frame\n");
return RS_NOT_OK;
}
if (isStart == true)
frame->pts = 0;
/* Allocate the encoded raw picture. */
if (width != c->width || height != c->height)
{
if (!sws_ctx)
{
sws_ctx = sws_getContext(width, height,
AV_PIX_FMT_BGR24, c->width, c->height,
AV_PIX_FMT_YUV420P, SWS_FAST_BILINEAR, 0, 0, 0);
if (!sws_ctx)
{
sprintf(strError, "Could not initialize the conversion context line %d\n", __LINE__);
commonGlobal->WriteRuntimeBackupLogs(strError);
return RS_NOT_OK;
}
}
uint8_t * inData[1] = { imageData }; // RGB24 have one plane
int inLinesize[1] = { 3 * width }; // RGB stride
sws_scale(sws_ctx, inData, inLinesize, 0, height, dst_picture.data, dst_picture.linesize);
}
else
BRG24ToYUV420p(dst_picture.data, imageData, width, height); //Phong Le changed this
}
if (oc->oformat->flags & AVFMT_RAWPICTURE)
{
/* Raw video case - directly store the picture in the packet */
AVPacket pkt;
av_init_packet(&pkt);
pkt.flags |= AV_PKT_FLAG_KEY;
pkt.stream_index = st->index;
pkt.data = dst_picture.data[0];
pkt.size = sizeof(AVPicture);
ret = av_interleaved_write_frame(oc, &pkt);
av_free_packet(&pkt);
}
else
{
/* encode the image */
AVPacket pkt;
int got_output;
av_init_packet(&pkt);
pkt.data = NULL; // packet data will be allocated by the encoder
pkt.size = 0;
ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
if (ret < 0) {
sprintf(strError, "Error encoding video frame line %d\n", __LINE__);
commonGlobal->WriteRuntimeBackupLogs(strError);
av_free_packet(&pkt);
return RS_NOT_OK;
}
/* If size is zero, it means the image was buffered. */
if (got_output) {
if (c->coded_frame->key_frame)
pkt.flags |= AV_PKT_FLAG_KEY;
pkt.stream_index = st->index;
/* Write the compressed frame to the media file. */
ret = av_interleaved_write_frame(oc, &pkt);
}
else
{
ret = 0;
}
av_free_packet(&pkt);
}
if (ret != 0)
{
sprintf(strError, "Error while writing video frame line %d\n", __LINE__);
commonGlobal->WriteRuntimeBackupLogs(strError);
return RS_NOT_OK;
}
frame->pts += av_rescale_q(1, st->codec->time_base, st->time_base);
return RS_OK;else /H264 data/
if (isStart == true)
packet->pts = 0;
else
packet->pts += av_rescale_q(1, st->codec->time_base, st->time_base);
ret = av_interleaved_write_frame(oc, packet);
if (ret < 0)
{
sprintf(strError, "Error while writing video frame line %d\n", __LINE__);
commonGlobal->WriteRuntimeBackupLogs(strError);
return RS_NOT_OK;
}
return RS_OK;
- Close file.
-> Case a : Creating AVI file successful.
-> Case b : Fail.
Thanks
Tien Vo - Allocate the output media context :
-
issue after video rotation how fix
2 avril 2015, par VahagnI have next code for rotate video
OpenCVFrameConverter.ToIplImage converter2 = new OpenCVFrameConverter.ToIplImage() ;
for (int i = firstIndex; i <= lastIndex; i++) {
long t = timestamps[i % timestamps.length] - startTime;
if (t >= 0) {
if (t > recorder.getTimestamp()) {
recorder.setTimestamp(t);
}
Frame g = converter2.convert(rotate(converter2.convertToIplImage(images[i % images.length]),9 0));
recorder.record(g);
}
}images[i] - Frame in JavaCV
after in video have green linesUPDATE
Convertation function/*
* Copyright (C) 2015 Samuel Audet
*
* This file is part of JavaCV.
*
* JavaCV is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version (subject to the "Classpath" exception
* as provided in the LICENSE.txt file that accompanied this code).
*
* JavaCV is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with JavaCV. If not, see /www.gnu.org/licenses/>.
*/
package com.example.vvardanyan.ffmpeg;
import org.bytedeco.javacpp.BytePointer;
import org.bytedeco.javacpp.Pointer;
import java.nio.Buffer;
import static org.bytedeco.javacpp.opencv_core.CV_16S;
import static org.bytedeco.javacpp.opencv_core.CV_16U;
import static org.bytedeco.javacpp.opencv_core.CV_32F;
import static org.bytedeco.javacpp.opencv_core.CV_32S;
import static org.bytedeco.javacpp.opencv_core.CV_64F;
import static org.bytedeco.javacpp.opencv_core.CV_8S;
import static org.bytedeco.javacpp.opencv_core.CV_8U;
import static org.bytedeco.javacpp.opencv_core.CV_MAKETYPE;
import static org.bytedeco.javacpp.opencv_core.IPL_DEPTH_16S;
import static org.bytedeco.javacpp.opencv_core.IPL_DEPTH_16U;
import static org.bytedeco.javacpp.opencv_core.IPL_DEPTH_32F;
import static org.bytedeco.javacpp.opencv_core.IPL_DEPTH_32S;
import static org.bytedeco.javacpp.opencv_core.IPL_DEPTH_64F;
import static org.bytedeco.javacpp.opencv_core.IPL_DEPTH_8S;
import static org.bytedeco.javacpp.opencv_core.IPL_DEPTH_8U;
import static org.bytedeco.javacpp.opencv_core.IplImage;
import static org.bytedeco.javacpp.opencv_core.Mat;
/**
* A utility class to map data between {@link Frame} and {@link IplImage} or {@link Mat}.
* Since this is an abstract class, one must choose between two concrete classes:
* {@link ToIplImage} or {@link ToMat}.
*
* @author Samuel Audet
*/
public abstract class OpenCVFrameConverter<f> extends FrameConverter<f> {
IplImage img;
Mat mat;
public static class ToIplImage extends OpenCVFrameConverter<iplimage> {
@Override public IplImage convert(Frame frame) { return convertToIplImage(frame); }
}
public static class ToMat extends OpenCVFrameConverter<mat> {
@Override public Mat convert(Frame frame) { return convertToMat(frame); }
}
public static int getFrameDepth(int depth) {
switch (depth) {
case IPL_DEPTH_8U: case CV_8U: return Frame.DEPTH_UBYTE;
case IPL_DEPTH_8S: case CV_8S: return Frame.DEPTH_BYTE;
case IPL_DEPTH_16U: case CV_16U: return Frame.DEPTH_USHORT;
case IPL_DEPTH_16S: case CV_16S: return Frame.DEPTH_SHORT;
case IPL_DEPTH_32F: case CV_32F: return Frame.DEPTH_FLOAT;
case IPL_DEPTH_32S: case CV_32S: return Frame.DEPTH_INT;
case IPL_DEPTH_64F: case CV_64F: return Frame.DEPTH_DOUBLE;
default: return -1;
}
}
public static int getIplImageDepth(Frame frame) {
switch (frame.imageDepth) {
case Frame.DEPTH_UBYTE: return IPL_DEPTH_8U;
case Frame.DEPTH_BYTE: return IPL_DEPTH_8S;
case Frame.DEPTH_USHORT: return IPL_DEPTH_16U;
case Frame.DEPTH_SHORT: return IPL_DEPTH_16S;
case Frame.DEPTH_FLOAT: return IPL_DEPTH_32F;
case Frame.DEPTH_INT: return IPL_DEPTH_32S;
case Frame.DEPTH_DOUBLE: return IPL_DEPTH_64F;
default: return -1;
}
}
static boolean isEqual(Frame frame, IplImage img) {
return img != null && frame != null && frame.image != null && frame.image.length > 0
&& frame.imageWidth == img.width() && frame.imageHeight == img.height()
&& frame.imageChannels == img.nChannels() && getIplImageDepth(frame) == img.depth()
&& new Pointer(frame.image[0]).address() == img.imageData().address()
&& frame.imageStride * Math.abs(frame.imageDepth) / 8 == img.widthStep();
}
public IplImage convertToIplImage(Frame frame) {
if (frame == null) {
return null;
} else if (frame.opaque instanceof IplImage) {
return (IplImage)frame.opaque;
} else if (!isEqual(frame, img)) {
int depth = getIplImageDepth(frame);
img = depth < 0 ? null : IplImage.createHeader(frame.imageWidth, frame.imageHeight, depth, frame.imageChannels)
.imageData(new BytePointer(new Pointer(frame.image[0].position(0)))).widthStep(frame.imageStride * Math.abs(frame.imageDepth) / 8);
}
return img;
}
public Frame convert(IplImage img) {
if (img == null) {
return null;
} else if (!isEqual(frame, img)) {
frame = new Frame();
frame.imageWidth = img.width();
frame.imageHeight = img.height();
frame.imageDepth = getFrameDepth(img.depth());
frame.imageChannels = img.nChannels();
frame.imageStride = img.widthStep() * 8 / Math.abs(frame.imageDepth);
frame.image = new Buffer[] { img.createBuffer() };
frame.opaque = img;
}
return frame;
}
public static int getMatDepth(Frame frame) {
switch (frame.imageDepth) {
case Frame.DEPTH_UBYTE: return CV_8U;
case Frame.DEPTH_BYTE: return CV_8S;
case Frame.DEPTH_USHORT: return CV_16U;
case Frame.DEPTH_SHORT: return CV_16S;
case Frame.DEPTH_FLOAT: return CV_32F;
case Frame.DEPTH_INT: return CV_32S;
case Frame.DEPTH_DOUBLE: return CV_64F;
default: return -1;
}
}
static boolean isEqual(Frame frame, Mat mat) {
return mat != null && frame != null && frame.image != null && frame.image.length > 0
&& frame.imageWidth == mat.cols() && frame.imageHeight == mat.rows()
&& frame.imageChannels == mat.channels() && getMatDepth(frame) == mat.depth()
&& new Pointer(frame.image[0]).address() == mat.data().address()
&& frame.imageStride * Math.abs(frame.imageDepth) / 8 == (int)mat.step();
}
public Mat convertToMat(Frame frame) {
if (frame == null) {
return null;
} else if (frame.opaque instanceof Mat) {
return (Mat)frame.opaque;
} else if (!isEqual(frame, mat)) {
int depth = getMatDepth(frame);
mat = depth < 0 ? null : new Mat(frame.imageHeight, frame.imageWidth, CV_MAKETYPE(depth, frame.imageChannels),
new Pointer(frame.image[0].position(0)), frame.imageStride * Math.abs(frame.imageDepth) / 8);
}
return mat;
}
public Frame convert(Mat mat) {
if (mat == null) {
return null;
} else if (!isEqual(frame, mat)) {
frame = new Frame();
frame.imageWidth = mat.cols();
frame.imageHeight = mat.rows();
frame.imageDepth = getFrameDepth(mat.depth());
frame.imageChannels = mat.channels();
frame.imageStride = (int)mat.step() * 8 / Math.abs(frame.imageDepth);
frame.image = new Buffer[] { mat.createBuffer() };
frame.opaque = mat;
}
return frame;
}
}
</mat></iplimage></f></f>