
Recherche avancée
Médias (91)
-
Les Miserables
9 décembre 2019, par
Mis à jour : Décembre 2019
Langue : français
Type : Textuel
-
VideoHandle
8 novembre 2019, par
Mis à jour : Novembre 2019
Langue : français
Type : Video
-
Somos millones 1
21 juillet 2014, par
Mis à jour : Juin 2015
Langue : français
Type : Video
-
Un test - mauritanie
3 avril 2014, par
Mis à jour : Avril 2014
Langue : français
Type : Textuel
-
Pourquoi Obama lit il mes mails ?
4 février 2014, par
Mis à jour : Février 2014
Langue : français
-
IMG 0222
6 octobre 2013, par
Mis à jour : Octobre 2013
Langue : français
Type : Image
Autres articles (35)
-
Websites made with MediaSPIP
2 mai 2011, parThis page lists some websites based on MediaSPIP.
-
Creating farms of unique websites
13 avril 2011, parMediaSPIP platforms can be installed as a farm, with a single "core" hosted on a dedicated server and used by multiple websites.
This allows (among other things) : implementation costs to be shared between several different projects / individuals rapid deployment of multiple unique sites creation of groups of like-minded sites, making it possible to browse media in a more controlled and selective environment than the major "open" (...) -
Contribute to a better visual interface
13 avril 2011MediaSPIP is based on a system of themes and templates. Templates define the placement of information on the page, and can be adapted to a wide range of uses. Themes define the overall graphic appearance of the site.
Anyone can submit a new graphic theme or template and make it available to the MediaSPIP community.
Sur d’autres sites (5684)
-
Live555 : X264 Stream Live source based on "testOnDemandRTSPServer"
12 janvier 2017, par user2660369I am trying to create a rtsp Server that streams the OpenGL output of my program. I had a look at How to write a Live555 FramedSource to allow me to stream H.264 live, but I need the stream to be unicast. So I had a look at testOnDemandRTSPServer. Using the same Code fails. To my understanding I need to provide memory in which I store my h264 frames so the OnDemandServer can read them on Demand.
H264VideoStreamServerMediaSubsession.cpp
H264VideoStreamServerMediaSubsession*
H264VideoStreamServerMediaSubsession::createNew(UsageEnvironment& env,
Boolean reuseFirstSource) {
return new H264VideoStreamServerMediaSubsession(env, reuseFirstSource);
}
H264VideoStreamServerMediaSubsession::H264VideoStreamServerMediaSubsession(UsageEnvironment& env, Boolean reuseFirstSource)
: OnDemandServerMediaSubsession(env, reuseFirstSource), fAuxSDPLine(NULL), fDoneFlag(0), fDummyRTPSink(NULL) {
}
H264VideoStreamServerMediaSubsession::~H264VideoStreamServerMediaSubsession() {
delete[] fAuxSDPLine;
}
static void afterPlayingDummy(void* clientData) {
H264VideoStreamServerMediaSubsession* subsess = (H264VideoStreamServerMediaSubsession*)clientData;
subsess->afterPlayingDummy1();
}
void H264VideoStreamServerMediaSubsession::afterPlayingDummy1() {
// Unschedule any pending 'checking' task:
envir().taskScheduler().unscheduleDelayedTask(nextTask());
// Signal the event loop that we're done:
setDoneFlag();
}
static void checkForAuxSDPLine(void* clientData) {
H264VideoStreamServerMediaSubsession* subsess = (H264VideoStreamServerMediaSubsession*)clientData;
subsess->checkForAuxSDPLine1();
}
void H264VideoStreamServerMediaSubsession::checkForAuxSDPLine1() {
char const* dasl;
if (fAuxSDPLine != NULL) {
// Signal the event loop that we're done:
setDoneFlag();
} else if (fDummyRTPSink != NULL && (dasl = fDummyRTPSink->auxSDPLine()) != NULL) {
fAuxSDPLine = strDup(dasl);
fDummyRTPSink = NULL;
// Signal the event loop that we're done:
setDoneFlag();
} else {
// try again after a brief delay:
int uSecsToDelay = 100000; // 100 ms
nextTask() = envir().taskScheduler().scheduleDelayedTask(uSecsToDelay,
(TaskFunc*)checkForAuxSDPLine, this);
}
}
char const* H264VideoStreamServerMediaSubsession::getAuxSDPLine(RTPSink* rtpSink, FramedSource* inputSource) {
if (fAuxSDPLine != NULL) return fAuxSDPLine; // it's already been set up (for a previous client)
if (fDummyRTPSink == NULL) { // we're not already setting it up for another, concurrent stream
// Note: For H264 video files, the 'config' information ("profile-level-id" and "sprop-parameter-sets") isn't known
// until we start reading the file. This means that "rtpSink"s "auxSDPLine()" will be NULL initially,
// and we need to start reading data from our file until this changes.
fDummyRTPSink = rtpSink;
// Start reading the file:
fDummyRTPSink->startPlaying(*inputSource, afterPlayingDummy, this);
// Check whether the sink's 'auxSDPLine()' is ready:
checkForAuxSDPLine(this);
}
envir().taskScheduler().doEventLoop(&fDoneFlag);
return fAuxSDPLine;
}
FramedSource* H264VideoStreamServerMediaSubsession::createNewStreamSource(unsigned /*clientSessionId*/, unsigned& estBitrate) {
estBitrate = 500; // kb
megamol::remotecontrol::View3D_MRC *parent = (megamol::remotecontrol::View3D_MRC*)this->parent;
return H264VideoStreamFramer::createNew(envir(), parent->h264FramedSource);
}
RTPSink* H264VideoStreamServerMediaSubsession::createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* /*inputSource*/) {
return H264VideoRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic);
}FramedSource.cpp
H264FramedSource* H264FramedSource::createNew(UsageEnvironment& env,
unsigned preferredFrameSize,
unsigned playTimePerFrame)
{
return new H264FramedSource(env, preferredFrameSize, playTimePerFrame);
}
H264FramedSource::H264FramedSource(UsageEnvironment& env,
unsigned preferredFrameSize,
unsigned playTimePerFrame)
: FramedSource(env),
fPreferredFrameSize(fMaxSize),
fPlayTimePerFrame(playTimePerFrame),
fLastPlayTime(0),
fCurIndex(0)
{
x264_param_default_preset(&param, "veryfast", "zerolatency");
param.i_threads = 1;
param.i_width = 1024;
param.i_height = 768;
param.i_fps_num = 30;
param.i_fps_den = 1;
// Intra refres:
param.i_keyint_max = 60;
param.b_intra_refresh = 1;
//Rate control:
param.rc.i_rc_method = X264_RC_CRF;
param.rc.f_rf_constant = 25;
param.rc.f_rf_constant_max = 35;
param.i_sps_id = 7;
//For streaming:
param.b_repeat_headers = 1;
param.b_annexb = 1;
x264_param_apply_profile(&param, "baseline");
param.i_log_level = X264_LOG_ERROR;
encoder = x264_encoder_open(&param);
pic_in.i_type = X264_TYPE_AUTO;
pic_in.i_qpplus1 = 0;
pic_in.img.i_csp = X264_CSP_I420;
pic_in.img.i_plane = 3;
x264_picture_alloc(&pic_in, X264_CSP_I420, 1024, 768);
convertCtx = sws_getContext(1024, 768, PIX_FMT_RGBA, 1024, 768, PIX_FMT_YUV420P, SWS_FAST_BILINEAR, NULL, NULL, NULL);
eventTriggerId = envir().taskScheduler().createEventTrigger(deliverFrame0);
}
H264FramedSource::~H264FramedSource()
{
envir().taskScheduler().deleteEventTrigger(eventTriggerId);
eventTriggerId = 0;
}
void H264FramedSource::AddToBuffer(uint8_t* buf, int surfaceSizeInBytes)
{
uint8_t* surfaceData = (new uint8_t[surfaceSizeInBytes]);
memcpy(surfaceData, buf, surfaceSizeInBytes);
int srcstride = 1024*4;
sws_scale(convertCtx, &surfaceData, &srcstride,0, 768, pic_in.img.plane, pic_in.img.i_stride);
x264_nal_t* nals = NULL;
int i_nals = 0;
int frame_size = -1;
frame_size = x264_encoder_encode(encoder, &nals, &i_nals, &pic_in, &pic_out);
static bool finished = false;
if (frame_size >= 0)
{
static bool alreadydone = false;
if(!alreadydone)
{
x264_encoder_headers(encoder, &nals, &i_nals);
alreadydone = true;
}
for(int i = 0; i < i_nals; ++i)
{
m_queue.push(nals[i]);
}
}
delete [] surfaceData;
surfaceData = nullptr;
envir().taskScheduler().triggerEvent(eventTriggerId, this);
}
void H264FramedSource::doGetNextFrame()
{
deliverFrame();
}
void H264FramedSource::deliverFrame0(void* clientData)
{
((H264FramedSource*)clientData)->deliverFrame();
}
void H264FramedSource::deliverFrame()
{
x264_nal_t nalToDeliver;
if (fPlayTimePerFrame > 0 && fPreferredFrameSize > 0) {
if (fPresentationTime.tv_sec == 0 && fPresentationTime.tv_usec == 0) {
// This is the first frame, so use the current time:
gettimeofday(&fPresentationTime, NULL);
} else {
// Increment by the play time of the previous data:
unsigned uSeconds = fPresentationTime.tv_usec + fLastPlayTime;
fPresentationTime.tv_sec += uSeconds/1000000;
fPresentationTime.tv_usec = uSeconds%1000000;
}
// Remember the play time of this data:
fLastPlayTime = (fPlayTimePerFrame*fFrameSize)/fPreferredFrameSize;
fDurationInMicroseconds = fLastPlayTime;
} else {
// We don't know a specific play time duration for this data,
// so just record the current time as being the 'presentation time':
gettimeofday(&fPresentationTime, NULL);
}
if(!m_queue.empty())
{
m_queue.wait_and_pop(nalToDeliver);
uint8_t* newFrameDataStart = (uint8_t*)0xD15EA5E;
newFrameDataStart = (uint8_t*)(nalToDeliver.p_payload);
unsigned newFrameSize = nalToDeliver.i_payload;
// Deliver the data here:
if (newFrameSize > fMaxSize) {
fFrameSize = fMaxSize;
fNumTruncatedBytes = newFrameSize - fMaxSize;
}
else {
fFrameSize = newFrameSize;
}
memcpy(fTo, nalToDeliver.p_payload, nalToDeliver.i_payload);
FramedSource::afterGetting(this);
}
}Relevant part of the RTSP-Server Therad
RTSPServer* rtspServer = RTSPServer::createNew(*(parent->env), 8554, NULL);
if (rtspServer == NULL) {
*(parent->env) << "Failed to create RTSP server: " << (parent->env)->getResultMsg() << "\n";
exit(1);
}
char const* streamName = "Stream";
parent->h264FramedSource = H264FramedSource::createNew(*(parent->env), 0, 0);
H264VideoStreamServerMediaSubsession *h264VideoStreamServerMediaSubsession = H264VideoStreamServerMediaSubsession::createNew(*(parent->env), true);
h264VideoStreamServerMediaSubsession->parent = parent;
sms->addSubsession(h264VideoStreamServerMediaSubsession);
rtspServer->addServerMediaSession(sms);
parent->env->taskScheduler().doEventLoop(); // does not returnOnce a connection exists the render loop calls
h264FramedSource->AddToBuffer(videoData, 1024*768*4);
-
Getting “field has incomplete type” and "conflicting types"
4 novembre 2013, par VietI'm trying build pjsip from source with video support by gcc on ubuntu. After i success full run ./configure and make dep, i run make and i have error below :
../src/pjmedia/ffmpeg_util.c:46:18: error: field ‘codec_id’ has incomplete type
../src/pjmedia/ffmpeg_util.c:148:13: error: conflicting types for ‘pjmedia_format_id_to_CodecID’
../src/pjmedia/ffmpeg_util.h:23:13: note: previous declaration of ‘pjmedia_format_id_to_CodecID’ was here
../src/pjmedia/ffmpeg_util.c: In function ‘pjmedia_format_id_to_CodecID’:
../src/pjmedia/ffmpeg_util.c:154:35: warning: comparison between pointer and integer [enabled by default]
../src/pjmedia/ffmpeg_util.c:155:6: error: dereferencing pointer to incomplete type
../src/pjmedia/ffmpeg_util.c:155:6: warning: statement with no effect [-Wunused-value]
../src/pjmedia/ffmpeg_util.c:160:5: error: dereferencing pointer to incomplete type
../src/pjmedia/ffmpeg_util.c:160:5: warning: statement with no effect [-Wunused-value]
../src/pjmedia/ffmpeg_util.c: At top level:
../src/pjmedia/ffmpeg_util.c:164:55: error: parameter 1 (‘codec_id’) has incomplete typeHere is the code in ffmpeg_util.h and ffmpeg_util.c
ffmpeg_util.h
#ifndef __PJMEDIA_FFMPEG_UTIL_H__
#define __PJMEDIA_FFMPEG_UTIL_H__
#include <pjmedia></pjmedia>format.h>
#ifdef _MSC_VER
# ifndef __cplusplus
# define inline _inline
# endif
# pragma warning(disable:4244) /* possible loss of data */
#endif
#include <libavutil></libavutil>avutil.h>
#include <libavcodec></libavcodec>avcodec.h>
void pjmedia_ffmpeg_add_ref();
void pjmedia_ffmpeg_dec_ref();
pj_status_t pjmedia_format_id_to_PixelFormat(pjmedia_format_id fmt_id,
enum PixelFormat *pixel_format);
pj_status_t PixelFormat_to_pjmedia_format_id(enum PixelFormat pf,
pjmedia_format_id *fmt_id);
pj_status_t pjmedia_format_id_to_CodecID(pjmedia_format_id fmt_id,
enum CodecID *codec_id);
pj_status_t CodecID_to_pjmedia_format_id(enum CodecID codec_id,
pjmedia_format_id *fmt_id);
#endif /* __PJMEDIA_FFMPEG_UTIL_H__ */ffmpeg_util.c
#include <pjmedia></pjmedia>types.h>
#include <pj></pj>errno.h>
#include <pj></pj>log.h>
#include <pj></pj>string.h>
#if PJMEDIA_HAS_LIBAVFORMAT && PJMEDIA_HAS_LIBAVUTIL
#include "ffmpeg_util.h"
#include <libavformat></libavformat>avformat.h>
#define MAKE_VER(mj,mn,mi) ((mj << 16) | (mn << 8) | (mi << 0))
#define VER_AT_LEAST(mj,mn,mi) (MAKE_VER(LIBAVUTIL_VERSION_MAJOR, \
LIBAVUTIL_VERSION_MINOR, \
LIBAVUTIL_VERSION_MICRO) >= \
MAKE_VER(mj,mn,mi))
/* Conversion table between pjmedia_format_id and PixelFormat */
static const struct ffmpeg_fmt_table_t
{
pjmedia_format_id id;
enum PixelFormat pf;
} ffmpeg_fmt_table[] =
{
{ PJMEDIA_FORMAT_RGBA, PIX_FMT_RGBA},
{ PJMEDIA_FORMAT_RGB24,PIX_FMT_BGR24},
{ PJMEDIA_FORMAT_BGRA, PIX_FMT_BGRA},
#if VER_AT_LEAST(51,20,1)
{ PJMEDIA_FORMAT_GBRP, PIX_FMT_GBR24P},
#endif
{ PJMEDIA_FORMAT_AYUV, PIX_FMT_NONE},
{ PJMEDIA_FORMAT_YUY2, PIX_FMT_YUYV422},
{ PJMEDIA_FORMAT_UYVY, PIX_FMT_UYVY422},
{ PJMEDIA_FORMAT_I420, PIX_FMT_YUV420P},
//{ PJMEDIA_FORMAT_YV12, PIX_FMT_YUV420P},
{ PJMEDIA_FORMAT_I422, PIX_FMT_YUV422P},
{ PJMEDIA_FORMAT_I420JPEG, PIX_FMT_YUVJ420P},
{ PJMEDIA_FORMAT_I422JPEG, PIX_FMT_YUVJ422P},
};
/* Conversion table between pjmedia_format_id and CodecID */
static const struct ffmpeg_codec_table_t
{
pjmedia_format_id id;
enum CodecID codec_id;
} ffmpeg_codec_table[] =
{
{PJMEDIA_FORMAT_H261, CODEC_ID_H261},
{PJMEDIA_FORMAT_H263, CODEC_ID_H263},
{PJMEDIA_FORMAT_H263P, CODEC_ID_H263P},
{PJMEDIA_FORMAT_H264, CODEC_ID_H264},
{PJMEDIA_FORMAT_MPEG1VIDEO, CODEC_ID_MPEG1VIDEO},
{PJMEDIA_FORMAT_MPEG2VIDEO, CODEC_ID_MPEG2VIDEO},
{PJMEDIA_FORMAT_MPEG4, CODEC_ID_MPEG4},
{PJMEDIA_FORMAT_MJPEG, CODEC_ID_MJPEG}
};
static int pjmedia_ffmpeg_ref_cnt;
static void ffmpeg_log_cb(void* ptr, int level, const char* fmt, va_list vl);
void pjmedia_ffmpeg_add_ref()
{
if (pjmedia_ffmpeg_ref_cnt++ == 0) {
av_log_set_level(AV_LOG_ERROR);
av_log_set_callback(&ffmpeg_log_cb);
av_register_all();
}
}
void pjmedia_ffmpeg_dec_ref()
{
if (pjmedia_ffmpeg_ref_cnt-- == 1) {
/* How to shutdown ffmpeg? */
}
if (pjmedia_ffmpeg_ref_cnt < 0) pjmedia_ffmpeg_ref_cnt = 0;
}
static void ffmpeg_log_cb(void* ptr, int level, const char* fmt, va_list vl)
{
const char *LOG_SENDER = "ffmpeg";
enum { LOG_LEVEL = 5 };
char buf[100];
int bufsize = sizeof(buf), len;
pj_str_t fmt_st;
/* Custom callback needs to filter log level by itself */
if (level > av_log_get_level())
return;
/* Add original ffmpeg sender to log format */
if (ptr) {
AVClass* avc = *(AVClass**)ptr;
len = pj_ansi_snprintf(buf, bufsize, "%s: ", avc->item_name(ptr));
bufsize -= len;
}
/* Copy original log format */
len = pj_ansi_strlen(fmt);
if (len > bufsize-1)
len = bufsize-1;
pj_memcpy(buf+sizeof(buf)-bufsize, fmt, len);
bufsize -= len;
/* Trim log format */
pj_strset(&fmt_st, buf, sizeof(buf)-bufsize);
pj_strrtrim(&fmt_st);
buf[fmt_st.slen] = '\0';
pj_log(LOG_SENDER, LOG_LEVEL, buf, vl);
}
pj_status_t pjmedia_format_id_to_PixelFormat(pjmedia_format_id fmt_id,
enum PixelFormat *pixel_format)
{
unsigned i;
for (i=0; iid==fmt_id && t->pf != PIX_FMT_NONE) {
*pixel_format = t->pf;
return PJ_SUCCESS;
}
}
*pixel_format = PIX_FMT_NONE;
return PJ_ENOTFOUND;
}
pj_status_t PixelFormat_to_pjmedia_format_id(enum PixelFormat pf,
pjmedia_format_id *fmt_id)
{
unsigned i;
for (i=0; ipf == pf) {
if (fmt_id) *fmt_id = t->id;
return PJ_SUCCESS;
}
}
return PJ_ENOTFOUND;
}
pj_status_t pjmedia_format_id_to_CodecID(pjmedia_format_id fmt_id,
enum CodecID *codec_id)
{
unsigned i;
for (i=0; iid==fmt_id && t->codec_id != PIX_FMT_NONE) {
*codec_id = t->codec_id;
return PJ_SUCCESS;
}
}
*codec_id = PIX_FMT_NONE;
return PJ_ENOTFOUND;
}
pj_status_t CodecID_to_pjmedia_format_id(enum CodecID codec_id,
pjmedia_format_id *fmt_id)
{
unsigned i;
for (i=0; icodec_id == codec_id) {
if (fmt_id) *fmt_id = t->id;
return PJ_SUCCESS;
}
}
return PJ_ENOTFOUND;
}
#ifdef _MSC_VER
# pragma comment( lib, "avformat.lib")
# pragma comment( lib, "avutil.lib")
#endif
#endif /* #if PJMEDIA_HAS_LIBAVFORMAT && PJMEDIA_HAS_LIBAVUTIL */Help me fix this error !
-
FFMPEG - avcodec_decode_video2 return "Invalid frame dimensions 0x0"
20 octobre 2014, par user3004612I am trying to build a simple FFMPEG MPEG2 video PES decoder on ANDROID using the ffmpeg/doc/example/decoding__encoding_8c-source.html.
I am using FFMPEG version 2.0 !
I initialize the ffmpeg system with the following code :
int VideoPlayer::_setupFFmpeg()
{
int rc;
AVCodec *codec;
av_register_all();
codec = avcodec_find_decoder(AV_CODEC_ID_MPEG2VIDEO);
if(NULL == codec){
LOGE("_setupFFmpeg. No codec!");
return -1;
}
LOGI("found: %p. name: %s", codec, codec->name);
_video_dec_ctx = avcodec_alloc_context3(codec);
if(NULL == _video_dec_ctx){
LOGE("_setupFFmpeg. Could not allocate codec context space!");
return -1;
}
_video_dec_ctx->debug = FF_DEBUG_PICT_INFO;
if(codec->capabilities & CODEC_CAP_TRUNCATED) _video_dec_ctx->flags |= CODEC_FLAG_TRUNCATED;
rc = avcodec_open2(_video_dec_ctx, codec, NULL);
if(rc < 0) {
LOGE("_setupFFmpeg. Could not open the codec: %s!", _video_dec_ctx->codec->name);
return -1;
}
av_init_packet(&_avpkt);
if(NULL == _video_frame) _video_frame = avcodec_alloc_frame();
if(NULL == _video_frame){
LOGE("_setupFFmpeg. Could not allocate memory for the video frame!");
return -1;
}
LOGI("_setupFFmpeg(exit)");
return 0;
}And then just having a loop that is continuously sending PES packets at the decoder calling this function :
int VideoPlayer::_playVideoPacket(PesPacket *packet)
{
int len, frameReady;
_avpkt.data = packet->buf;
_avpkt.size = packet->info.bufferSize;
while(_avpkt.size){
len = avcodec_decode_video2(_video_dec_ctx, _video_frame, &frameReady, &_avpkt);
if(len < 0){
LOGE("FFW_decodeVideo");
return len;
}
if(frameReady){
//Draw the picture...
}
_avpkt.size -= len;
_avpkt.data += len;
}
return 1;
}But when I run the code I get :
"Invalid frame dimensions 0x0" error from FFMPEG after calling avcodec_decode_video2().It seems that I am not setting up the codec correctly. The MpegEncContext in Mpeg1Context in mpeg12dec.c is not setup correctly. What do I have to do to setup MpegEncContext correctly ?