
Recherche avancée
Médias (91)
-
MediaSPIP Simple : futur thème graphique par défaut ?
26 septembre 2013, par
Mis à jour : Octobre 2013
Langue : français
Type : Video
-
avec chosen
13 septembre 2013, par
Mis à jour : Septembre 2013
Langue : français
Type : Image
-
sans chosen
13 septembre 2013, par
Mis à jour : Septembre 2013
Langue : français
Type : Image
-
config chosen
13 septembre 2013, par
Mis à jour : Septembre 2013
Langue : français
Type : Image
-
SPIP - plugins - embed code - Exemple
2 septembre 2013, par
Mis à jour : Septembre 2013
Langue : français
Type : Image
-
GetID3 - Bloc informations de fichiers
9 avril 2013, par
Mis à jour : Mai 2013
Langue : français
Type : Image
Autres articles (66)
-
Soumettre améliorations et plugins supplémentaires
10 avril 2011Si vous avez développé une nouvelle extension permettant d’ajouter une ou plusieurs fonctionnalités utiles à MediaSPIP, faites le nous savoir et son intégration dans la distribution officielle sera envisagée.
Vous pouvez utiliser la liste de discussion de développement afin de le faire savoir ou demander de l’aide quant à la réalisation de ce plugin. MediaSPIP étant basé sur SPIP, il est également possible d’utiliser le liste de discussion SPIP-zone de SPIP pour (...) -
Le profil des utilisateurs
12 avril 2011, parChaque utilisateur dispose d’une page de profil lui permettant de modifier ses informations personnelle. Dans le menu de haut de page par défaut, un élément de menu est automatiquement créé à l’initialisation de MediaSPIP, visible uniquement si le visiteur est identifié sur le site.
L’utilisateur a accès à la modification de profil depuis sa page auteur, un lien dans la navigation "Modifier votre profil" est (...) -
XMP PHP
13 mai 2011, parDixit Wikipedia, XMP signifie :
Extensible Metadata Platform ou XMP est un format de métadonnées basé sur XML utilisé dans les applications PDF, de photographie et de graphisme. Il a été lancé par Adobe Systems en avril 2001 en étant intégré à la version 5.0 d’Adobe Acrobat.
Étant basé sur XML, il gère un ensemble de tags dynamiques pour l’utilisation dans le cadre du Web sémantique.
XMP permet d’enregistrer sous forme d’un document XML des informations relatives à un fichier : titre, auteur, historique (...)
Sur d’autres sites (6303)
-
iOS allocation grow using x264 encoding
19 juillet 2013, par cssmhylI get the video yuv data in callback and save the image data by NSData.Then I put the data into NSData,And put the array to queue(NSMutableArray). These are code :
- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection{
if ([Application sharedInstance].isRecording) {
if (captureOutput == self.captureManager.videOutput) {
uint64_t capturedHostTime = [self GetTickCount];
int allSpace = capturedHostTime - lastCapturedHostTime;
NSNumber *spaces = [[NSNumber alloc] initWithInt:allSpace];
NSNumber *startTime = [[NSNumber alloc] initWithUnsignedLongLong:lastCapturedHostTime];
lastCapturedHostTime = capturedHostTime;
CVImageBufferRef pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
CVPixelBufferLockBaseAddress(pixelBuffer, 0);
uint8_t *baseAddress0 = (uint8_t *)CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 0);
uint8_t *baseAddress1 = (uint8_t *)CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 1);
size_t width = CVPixelBufferGetWidth(pixelBuffer);
size_t height = CVPixelBufferGetHeight(pixelBuffer);
NSData *baseAddress0Data = [[NSData alloc] initWithBytes:baseAddress0 length:width*height];
NSData *baseAddress1Data = [[NSData alloc] initWithBytes:baseAddress1 length:width*height/2];
CVPixelBufferUnlockBaseAddress(pixelBuffer, 0);
NSArray *array = [[NSArray alloc] initWithObjects:baseAddress0Data,baseAddress1Data,spaces,startTime ,nil];
[baseAddress0Data release];
[baseAddress1Data release];
[spaces release];
[startTime release];
@synchronized([Application sharedInstance].pearVideoQueue){
[[Application sharedInstance] enqueuePearVideo:[Application sharedInstance].pearVideoQueue withData:array];
[array release];
}
}
}
}now,I run an operation and get data from the queue ,then encode them by x264.I destory de array after encoding.
- (void)main{
while ([Application sharedInstance].pearVideoQueue) {
if (![Application sharedInstance].isRecording) {
NSLog(@"encode operation break");
break;
}
if (![[Application sharedInstance].pearVideoQueue isQueueEmpty]) {
NSArray *pearVideoArray;
@synchronized([Application sharedInstance].pearVideoQueue){
pearVideoArray = [[Application sharedInstance].pearVideoQueue dequeue];
[[Application sharedInstance] encodeToH264:pearVideoArray];
[pearVideoArray release];
pearVideoArray = nil;
}
} else{
[NSThread sleepForTimeInterval:0.01];
}
}
}this is encoding method
- (void)encodeX264:(NSArray *)array{
int i264Nal;
x264_picture_t pic_out;
x264_nal_t *p264Nal;
NSNumber *st = [array lastObject];
NSNumber *sp = [array objectAtIndex:2];
uint64_t startTime = [st unsignedLongLongValue];
int spaces = [sp intValue];
NSData *baseAddress0Data = [array objectAtIndex:0];
NSData *baseAddress1Data = [array objectAtIndex:1];
const char *baseAddress0 = baseAddress0Data.bytes;
const char *baseAddress1 = baseAddress1Data.bytes;
if (baseAddress0 == nil) {
return;
}
memcpy(p264Pic->img.plane[0], baseAddress0, PRESENT_FRAME_WIDTH*PRESENT_FRAME_HEIGHT);
uint8_t * pDst1 = p264Pic->img.plane[1];
uint8_t * pDst2 = p264Pic->img.plane[2];
for( int i = 0; i < PRESENT_FRAME_WIDTH*PRESENT_FRAME_HEIGHT/4; i ++ )
{
*pDst1++ = *baseAddress1++;
*pDst2++ = *baseAddress1++;
}
if( x264_encoder_encode( p264Handle, &p264Nal, &i264Nal, p264Pic ,&pic_out) < 0 )
{
fprintf( stderr, "x264_encoder_encode failed/n" );
}
i264Nal = 0;
if (i264Nal > 0) {
int i_size;
int spslen =0;
unsigned char spsData[1024];
char * data = (char *)szBuffer+100;
memset(szBuffer, 0, sizeof(szBuffer));
if (ifFirstSps) {
ifFirstSps = NO;
if (![Application sharedInstance].ifAudioStarted) {
NSLog(@"video first");
[Application sharedInstance].startTick = startTime;
NSLog(@"startTick: %llu",startTime);
[Application sharedInstance].ifVideoStarted = YES;
}
}
for (int i=0 ; inal_buffer_size < p264Nal[i].i_payload*3/2+4) {
p264Handle->nal_buffer_size = p264Nal[i].i_payload*2+4;
x264_free( p264Handle->nal_buffer );
p264Handle->nal_buffer = x264_malloc( p264Handle->nal_buffer_size );
}
i_size = p264Nal[i].i_payload;
memcpy(data, p264Nal[i].p_payload, p264Nal[i].i_payload);
int splitNum = 0;
for (int i=0; i=1) {
timeSpace = spaces/(i264Nal-1)*i;
}else{
timeSpace = spaces/i264Nal*i;
}
int timeStamp = startTime-[Application sharedInstance].startTick + timeSpace;
switch (type) {
case NALU_TYPE_SPS:
spslen = i_size-splitNum;
memcpy(spsData, data, spslen);
break;
case NALU_TYPE_PPS:
timeStamp = timeStamp - timeSpace;
[self pushSpsAndPpsQueue:(char *)spsData andppsData:(char *)data withPPSlength:spslen andPPSlength:(i_size-splitNum) andTimeStamp:timeStamp];
break;
case NALU_TYPE_IDR:
[self pushVideoNALU:(char *)data withLength:(i_size-splitNum) ifIDR:YES andTimeStamp:timeStamp];
break;
case NALU_TYPE_SLICE:
case NALU_TYPE_SEI:
[self pushVideoNALU:(char *)data withLength:(i_size-splitNum) ifIDR:NO andTimeStamp:timeStamp];
break;
default:
break;
}
}
}
}the question is :
I used instruments and found that the data I captured increase ,but NSLog
show that the space-time I create de array and release it did not increase,and when I release ,the array's retain count is 1. the object's retain count it contains is also one.
then I didn't encode,the memory didn't increase...I was confused...please help..
the image pixel is 640x480.intruments leaks picture:
-
php ming flash slideshow to mp4/avi
19 août 2013, par StefanAfter hours of searching and trying i finally got a nice script together that generates a good looking Flash .swf file with a nice transaction in between de images.
It works great if you access the swf file directly in a browser, depending on the amount of images the flash created takes anywhere between 10 and 60 seconds.
But when uploading to Youtube the movie created flashed by in one second.
Because swf isnt really a accepted fileformat for Youtube we decided to convert the flash file to mp4 or avi using ffmpeg.
Unfortunally that didnt work, it had the same effect as the youtube movie.
We had a old version of ffmpeg and updated that to a recent version and tried to convert again with the same result.
The main thing i see is that ffmpeg cant see the swf file duration and bitrate, they are both 'N/A' while were do set them in the php script.Now i have to admit i havent really tested with the new version because the commandline options are a little different but ill work on that after i post this.
In the previous version we tried setting the framerate of the source swf file, but that didnt work either.Anyone here that can has a idea ? it would be greatly appriciated.
PHP Ming Script :
$fps = 30;
foreach($objects as $objectId => $images){
// START FLASH MOVIE
$m = new SWFMovie();
$m->setDimension($width, $height);
$m->setBackground(0, 0, 0);
$m->setRate($fps);
$m->setFrames(count($images)*202); //count(images)* 2 breaks *($fps*$breakTime)+22(fadeOut))
$i = 0;
foreach($images as $image){
// REMOVE THE BACKGROUND IMAGE
if($behind){
$m->remove($behind);
}
// # REMOVE
// LOAD NEW IMAGE
$img = new SWFBitmap(fopen($image,"rb"));
$pic = $m->add($img);
$pic->setdepth(3);
// # LOAD
// BREAK TIME
for($j=1;$j<=($fps*$breakTime);$j++){
$m->nextFrame();
}
$m->remove($pic);
// # BREAK
// LOAD THE NEXT IMAGE AS BACKGROUND, IF LAST IMAGE, LOAD FIRST
$nextBackgrondImage =($images[$i+1]) ? $images[$i+1] : $images[0] ;
$img = new SWFBitmap(fopen($nextBackgrondImage,"rb"));
$behind = $m->add($img);
$behind->setdepth(2);
// # LOAD
// AND FADE OUT AGAIN
$img = fadeOut($image, $width, $height);
$pic = $m->add($img);
$pic->setdepth(3);
// # FADE OUT
// BREAK TIME
for($j=1;$j<=($fps*$breakTime);$j++){
$m->nextFrame();
}
$m->remove($pic);
# BREAK
$i++;
}
$m->save('./flash/'.$nvmId.'_'.$objectId.'.swf');
unset($m);
}
}FFMPEG version :
root@server:~# ffmpeg -version
\FFmpeg version SVN-r26402, Copyright (c) 2000-2011 the FFmpeg developers
built on Aug 15 2013 20:43:21 with gcc 4.4.5
configuration: --enable-libmp3lame --enable-libtheora --enable-libx264
--enable-libgsm --enable-postproc --enable-libxvid --enable-libfaac --enable-pthreads
--enable-libvorbis --enable-gpl --enable-x11grab --enable-nonfree
libavutil 50.36. 0 / 50.36. 0
libavcore 0.16. 1 / 0.16. 1
libavcodec 52.108. 0 / 52.108. 0
libavformat 52.93. 0 / 52.93. 0
libavdevice 52. 2. 3 / 52. 2. 3
libavfilter 1.74. 0 / 1.74. 0
libswscale 0.12. 0 / 0.12. 0
libpostproc 51. 2. 0 / 51. 2. 0
FFmpeg SVN-r26402
libavutil 50.36. 0 / 50.36. 0
libavcore 0.16. 1 / 0.16. 1
libavcodec 52.108. 0 / 52.108. 0
libavformat 52.93. 0 / 52.93. 0
libavdevice 52. 2. 3 / 52. 2. 3
libavfilter 1.74. 0 / 1.74. 0
libswscale 0.12. 0 / 0.12. 0
libpostproc 51. 2. 0 / 51. 2. 0FFMPEG command
root@server:~# ffmpeg -r 30 -i '/pathTo/public_html/flash/73003_8962011.swf' -vcodec libx264 /pathTo/public_html/flash/out.mp4
[swf @ 0x16c2510] Estimating duration from bitrate, this may be inaccurate
Input #0, swf, from '/pathTo/public_html/flash/73003_8962011.swf':
Duration: N/A, bitrate: N/A
Stream #0.0: Video: mjpeg, yuvj420p, 360x480, 30 fps, 30 tbr, 30 tbn, 30 tbc
[buffer @ 0x16d5850] w:360 h:480 pixfmt:yuvj420p
[libx264 @ 0x16d4d80] broken ffmpeg default settings detected
[libx264 @ 0x16d4d80] use an encoding preset (e.g. -vpre medium)
[libx264 @ 0x16d4d80] preset usage: -vpre <speed> -vpre <profile>
[libx264 @ 0x16d4d80] speed presets are listed in x264 --help
[libx264 @ 0x16d4d80] profile is optional; x264 defaults to high
Output #0, mp4, to '/pathTo/public_html/out.mp4':
Stream #0.0: Video: libx264, yuvj420p, 360x480, q=2-31, 200 kb/s, 90k tbn, 30 tbc
Stream mapping:
Stream #0.0 -> #0.0
Error while opening encoder for output stream #0.0 - maybe incorrect parameters such as bit_rate, rate, width or height
</profile></speed> -
Problems with Streaming a Multicast RTSP Stream with Live555
16 juin 2014, par ALM865I am having trouble setting up a Multicast RTSP session using Live555. The examples included with Live555 are mostly irrelevant as they deal with reading in files and my code differs because it reads in encoded frames generated from a FFMPEG thread within my own program (no pipes, no saving to disk, it is genuinely passing pointers to memory that contain the encoded frames for Live555 to stream).
My Live555 project that uses a custom Server Media Subsession so that I can receive data from an FFMPEG thread within my program (instead of Live555’s default reading from a file, yuk !). This is a requirement of my program as it reads in a GigEVision stream in one thread, sends the decoded raw RGB packets to the FFMPEG thread, which then in turn sends the encoded frames off to Live555 for RTSP streaming.
For the life of me I can’t work out how to send the RTSP stream as multicast instead of unicast !
Just a note, my program works perfectly at the moment streaming Unicast, so there is nothing wrong with my Live555 implementation (before you go crazy picking out irrelevant errors !). I just need to know how to modify my existing code to stream Multicast instead of Unicast.
My program is way too big to upload and share so I’m just going to share the important bits :
Live_AnalysingServerMediaSubsession.h
#ifndef _ANALYSING_SERVER_MEDIA_SUBSESSION_HH
#define _ANALYSING_SERVER_MEDIA_SUBSESSION_HH
#include
#include "Live_AnalyserInput.h"
class AnalysingServerMediaSubsession: public OnDemandServerMediaSubsession {
public:
static AnalysingServerMediaSubsession*
createNew(UsageEnvironment& env, AnalyserInput& analyserInput, unsigned estimatedBitrate,
Boolean iFramesOnly = False,
double vshPeriod = 5.0
/* how often (in seconds) to inject a Video_Sequence_Header,
if one doesn't already appear in the stream */);
protected: // we're a virtual base class
AnalysingServerMediaSubsession(UsageEnvironment& env, AnalyserInput& AnalyserInput, unsigned estimatedBitrate, Boolean iFramesOnly, double vshPeriod);
virtual ~AnalysingServerMediaSubsession();
protected:
AnalyserInput& fAnalyserInput;
unsigned fEstimatedKbps;
private:
Boolean fIFramesOnly;
double fVSHPeriod;
// redefined virtual functions
virtual FramedSource* createNewStreamSource(unsigned clientSessionId, unsigned& estBitrate);
virtual RTPSink* createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* inputSource);
};
#endifAnd "Live_AnalysingServerMediaSubsession.cpp"
#include "Live_AnalysingServerMediaSubsession.h"
#include
#include
#include
AnalysingServerMediaSubsession* AnalysingServerMediaSubsession::createNew(UsageEnvironment& env, AnalyserInput& wisInput, unsigned estimatedBitrate,
Boolean iFramesOnly,
double vshPeriod) {
return new AnalysingServerMediaSubsession(env, wisInput, estimatedBitrate,
iFramesOnly, vshPeriod);
}
AnalysingServerMediaSubsession
::AnalysingServerMediaSubsession(UsageEnvironment& env, AnalyserInput& analyserInput, unsigned estimatedBitrate, Boolean iFramesOnly, double vshPeriod)
: OnDemandServerMediaSubsession(env, True /*reuse the first source*/),
fAnalyserInput(analyserInput), fIFramesOnly(iFramesOnly), fVSHPeriod(vshPeriod) {
fEstimatedKbps = (estimatedBitrate + 500)/1000;
}
AnalysingServerMediaSubsession
::~AnalysingServerMediaSubsession() {
}
FramedSource* AnalysingServerMediaSubsession ::createNewStreamSource(unsigned /*clientSessionId*/, unsigned& estBitrate) {
estBitrate = fEstimatedKbps;
// Create a framer for the Video Elementary Stream:
//LOG_MSG("Create Net Stream Source [%d]", estBitrate);
return MPEG1or2VideoStreamDiscreteFramer::createNew(envir(), fAnalyserInput.videoSource());
}
RTPSink* AnalysingServerMediaSubsession ::createNewRTPSink(Groupsock* rtpGroupsock, unsigned char /*rtpPayloadTypeIfDynamic*/, FramedSource* /*inputSource*/) {
setVideoRTPSinkBufferSize();
/*
struct in_addr destinationAddress;
destinationAddress.s_addr = inet_addr("239.255.12.42");
rtpGroupsock->addDestination(destinationAddress,8888);
rtpGroupsock->multicastSendOnly();
*/
return MPEG1or2VideoRTPSink::createNew(envir(), rtpGroupsock);
}Live_AnalyserSouce.h
#ifndef _ANALYSER_SOURCE_HH
#define _ANALYSER_SOURCE_HH
#ifndef _FRAMED_SOURCE_HH
#include "FramedSource.hh"
#endif
class FFMPEG;
// The following class can be used to define specific encoder parameters
class AnalyserParameters {
public:
FFMPEG * Encoding_Source;
};
class AnalyserSource: public FramedSource {
public:
static AnalyserSource* createNew(UsageEnvironment& env, FFMPEG * E_Source);
static unsigned GetRefCount();
public:
static EventTriggerId eventTriggerId;
protected:
AnalyserSource(UsageEnvironment& env, FFMPEG * E_Source);
// called only by createNew(), or by subclass constructors
virtual ~AnalyserSource();
private:
// redefined virtual functions:
virtual void doGetNextFrame();
private:
static void deliverFrame0(void* clientData);
void deliverFrame();
private:
static unsigned referenceCount; // used to count how many instances of this class currently exist
FFMPEG * Encoding_Source;
unsigned int Last_Sent_Frame_ID;
};
#endifLive_AnalyserSource.cpp
#include "Live_AnalyserSource.h"
#include // for "gettimeofday()"
#include "FFMPEGClass.h"
AnalyserSource* AnalyserSource::createNew(UsageEnvironment& env, FFMPEG * E_Source) {
return new AnalyserSource(env, E_Source);
}
EventTriggerId AnalyserSource::eventTriggerId = 0;
unsigned AnalyserSource::referenceCount = 0;
AnalyserSource::AnalyserSource(UsageEnvironment& env, FFMPEG * E_Source) : FramedSource(env), Encoding_Source(E_Source) {
if (referenceCount == 0) {
// Any global initialization of the device would be done here:
}
++referenceCount;
// Any instance-specific initialization of the device would be done here:
Last_Sent_Frame_ID = 0;
/* register us with the Encoding thread so we'll get notices when new frame data turns up.. */
Encoding_Source->RegisterRTSP_Source(&(env.taskScheduler()), this);
// We arrange here for our "deliverFrame" member function to be called
// whenever the next frame of data becomes available from the device.
//
// If the device can be accessed as a readable socket, then one easy way to do this is using a call to
// envir().taskScheduler().turnOnBackgroundReadHandling( ... )
// (See examples of this call in the "liveMedia" directory.)
//
// If, however, the device *cannot* be accessed as a readable socket, then instead we can implement is using 'event triggers':
// Create an 'event trigger' for this device (if it hasn't already been done):
if (eventTriggerId == 0) {
eventTriggerId = envir().taskScheduler().createEventTrigger(deliverFrame0);
}
}
AnalyserSource::~AnalyserSource() {
// Any instance-specific 'destruction' (i.e., resetting) of the device would be done here:
/* de-register this source from the Encoding thread, since we no longer need notices.. */
Encoding_Source->Un_RegisterRTSP_Source(this);
--referenceCount;
if (referenceCount == 0) {
// Any global 'destruction' (i.e., resetting) of the device would be done here:
// Reclaim our 'event trigger'
envir().taskScheduler().deleteEventTrigger(eventTriggerId);
eventTriggerId = 0;
}
}
unsigned AnalyserSource::GetRefCount() {
return referenceCount;
}
void AnalyserSource::doGetNextFrame() {
// This function is called (by our 'downstream' object) when it asks for new data.
//LOG_MSG("Do Next Frame..");
// Note: If, for some reason, the source device stops being readable (e.g., it gets closed), then you do the following:
//if (0 /* the source stops being readable */ /*%%% TO BE WRITTEN %%%*/) {
unsigned int FrameID = Encoding_Source->GetFrameID();
if (FrameID == 0){
//LOG_MSG("No Data. Close");
handleClosure(this);
return;
}
// If a new frame of data is immediately available to be delivered, then do this now:
if (Last_Sent_Frame_ID != FrameID){
deliverFrame();
//DEBUG_MSG("Frame ID: %d",FrameID);
}
// No new data is immediately available to be delivered. We don't do anything more here.
// Instead, our event trigger must be called (e.g., from a separate thread) when new data becomes available.
}
void AnalyserSource::deliverFrame0(void* clientData) {
((AnalyserSource*)clientData)->deliverFrame();
}
void AnalyserSource::deliverFrame() {
if (!isCurrentlyAwaitingData()) return; // we're not ready for the data yet
static u_int8_t* newFrameDataStart;
static unsigned newFrameSize = 0;
/* get the data frame from the Encoding thread.. */
if (Encoding_Source->GetFrame(&newFrameDataStart, &newFrameSize, &Last_Sent_Frame_ID)){
if (newFrameDataStart!=NULL) {
/* This should never happen, but check anyway.. */
if (newFrameSize > fMaxSize) {
fFrameSize = fMaxSize;
fNumTruncatedBytes = newFrameSize - fMaxSize;
} else {
fFrameSize = newFrameSize;
}
gettimeofday(&fPresentationTime, NULL); // If you have a more accurate time - e.g., from an encoder - then use that instead.
// If the device is *not* a 'live source' (e.g., it comes instead from a file or buffer), then set "fDurationInMicroseconds" here.
/* move the data to be sent off.. */
memmove(fTo, newFrameDataStart, fFrameSize);
/* release the Mutex we had on the Frame's buffer.. */
Encoding_Source->ReleaseFrame();
}
else {
//AM Added, something bad happened
//ALTRACE("LIVE555: FRAME NULL\n");
fFrameSize=0;
fTo=NULL;
handleClosure(this);
}
}
else {
//LOG_MSG("Closing Connection due to Frame Error..");
handleClosure(this);
}
// After delivering the data, inform the reader that it is now available:
FramedSource::afterGetting(this);
}Live_AnalyserInput.cpp
#include "Live_AnalyserInput.h"
#include "Live_AnalyserSource.h"
////////// WISInput implementation //////////
AnalyserInput* AnalyserInput::createNew(UsageEnvironment& env, FFMPEG *Encoder) {
if (!fHaveInitialized) {
//if (!initialize(env)) return NULL;
fHaveInitialized = True;
}
return new AnalyserInput(env, Encoder);
}
FramedSource* AnalyserInput::videoSource() {
if (fOurVideoSource == NULL || AnalyserSource::GetRefCount() == 0) {
fOurVideoSource = AnalyserSource::createNew(envir(), m_Encoder);
}
return fOurVideoSource;
}
AnalyserInput::AnalyserInput(UsageEnvironment& env, FFMPEG *Encoder): Medium(env), m_Encoder(Encoder) {
}
AnalyserInput::~AnalyserInput() {
/* When we get destroyed, make sure our source is also destroyed.. */
if (fOurVideoSource != NULL && AnalyserSource::GetRefCount() != 0) {
AnalyserSource::handleClosure(fOurVideoSource);
}
}
Boolean AnalyserInput::fHaveInitialized = False;
int AnalyserInput::fOurVideoFileNo = -1;
FramedSource* AnalyserInput::fOurVideoSource = NULL;Live_AnalyserInput.h
#ifndef _ANALYSER_INPUT_HH
#define _ANALYSER_INPUT_HH
#include
#include "FFMPEGClass.h"
class AnalyserInput: public Medium {
public:
static AnalyserInput* createNew(UsageEnvironment& env, FFMPEG *Encoder);
FramedSource* videoSource();
private:
AnalyserInput(UsageEnvironment& env, FFMPEG *Encoder); // called only by createNew()
virtual ~AnalyserInput();
private:
friend class WISVideoOpenFileSource;
static Boolean fHaveInitialized;
static int fOurVideoFileNo;
static FramedSource* fOurVideoSource;
FFMPEG *m_Encoder;
};
// Functions to set the optimal buffer size for RTP sink objects.
// These should be called before each RTPSink is created.
#define VIDEO_MAX_FRAME_SIZE 300000
inline void setVideoRTPSinkBufferSize() { OutPacketBuffer::maxSize = VIDEO_MAX_FRAME_SIZE; }
#endifAnd finally the relevant code from my Live555 worker thread that starts the whole process :
Stop_RTSP_Loop=0;
// MediaSession *ms;
TaskScheduler *scheduler;
UsageEnvironment *env ;
// RTSPClient *rtsp;
// MediaSubsession *Video_Sub;
char RTSP_Address[1024];
RTSP_Address[0]=0x00;
if (m_Encoder == NULL){
//DEBUG_MSG("No Video Encoder registered for the RTSP Encoder");
return 0;
}
scheduler = BasicTaskScheduler::createNew();
env = BasicUsageEnvironment::createNew(*scheduler);
UserAuthenticationDatabase* authDB = NULL;
#ifdef ACCESS_CONTROL
// To implement client access control to the RTSP server, do the following:
if (m_Enable_Pass){
authDB = new UserAuthenticationDatabase;
authDB->addUserRecord(UserN, PassW);
}
////////// authDB = new UserAuthenticationDatabase;
////////// authDB->addUserRecord((char*)"Admin", (char*)"Admin"); // replace these with real strings
// Repeat the above with each <username>, <password> that you wish to allow
// access to the server.
#endif
// Create the RTSP server:
RTSPServer* rtspServer = RTSPServer::createNew(*env, 554, authDB);
ServerMediaSession* sms;
AnalyserInput* inputDevice;
if (rtspServer == NULL) {
TRACE("LIVE555: Failed to create RTSP server: %s\n", env->getResultMsg());
return 0;
}
else {
char const* descriptionString = "Session streamed by \"IMC Server\"";
// Initialize the WIS input device:
inputDevice = AnalyserInput::createNew(*env, m_Encoder);
if (inputDevice == NULL) {
TRACE("Live555: Failed to create WIS input device\n");
return 0;
}
else {
// A MPEG-1 or 2 video elementary stream:
/* Increase the buffer size so we can handle the high res stream.. */
OutPacketBuffer::maxSize = 300000;
// NOTE: This *must* be a Video Elementary Stream; not a Program Stream
sms = ServerMediaSession::createNew(*env, RTSP_Address, RTSP_Address, descriptionString);
//sms->addSubsession(MPEG1or2VideoFileServerMediaSubsession::createNew(*env, inputFileName, reuseFirstSource, iFramesOnly));
sms->addSubsession(AnalysingServerMediaSubsession::createNew(*env, *inputDevice, m_Encoder->Get_Bitrate()));
//sms->addSubsession(WISMPEG1or2VideoServerMediaSubsession::createNew(sms->envir(), inputDevice, videoBitrate));
rtspServer->addServerMediaSession(sms);
//announceStream(rtspServer, sms, streamName, inputFileName);
//LOG_MSG("Play this stream using the URL %s", rtspServer->rtspURL(sms));
}
}
Stop_RTSP_Loop=0;
for (;;)
{
/* The actual work is all carried out inside the LIVE555 Task scheduler */
env->taskScheduler().doEventLoop(&Stop_RTSP_Loop); // does not return
if (mStop) {
break;
}
}
Medium::close(rtspServer); // will also reclaim "sms" and its "ServerMediaSubsession"s
Medium::close(inputDevice);
</password></username>