
Recherche avancée
Autres articles (112)
-
Personnaliser en ajoutant son logo, sa bannière ou son image de fond
5 septembre 2013, parCertains thèmes prennent en compte trois éléments de personnalisation : l’ajout d’un logo ; l’ajout d’une bannière l’ajout d’une image de fond ;
-
Ecrire une actualité
21 juin 2013, parPrésentez les changements dans votre MédiaSPIP ou les actualités de vos projets sur votre MédiaSPIP grâce à la rubrique actualités.
Dans le thème par défaut spipeo de MédiaSPIP, les actualités sont affichées en bas de la page principale sous les éditoriaux.
Vous pouvez personnaliser le formulaire de création d’une actualité.
Formulaire de création d’une actualité Dans le cas d’un document de type actualité, les champs proposés par défaut sont : Date de publication ( personnaliser la date de publication ) (...) -
Publier sur MédiaSpip
13 juin 2013Puis-je poster des contenus à partir d’une tablette Ipad ?
Oui, si votre Médiaspip installé est à la version 0.2 ou supérieure. Contacter au besoin l’administrateur de votre MédiaSpip pour le savoir
Sur d’autres sites (6089)
-
ffmpeg Audiosegment error in get audio chunks in socketIo server in python
26 janvier 2024, par a_crszkvc30Last_NameColI want to send each audio chunk every minute.
this is the test code and i want to save audiofile and audio chunk file.
then, i will combine two audio files stop button was worked correctly but with set time function is not worked in python server.
there is python server code with socketio


def handle_voice(sid,data): # blob 으로 들어온 데이터 
 # BytesIO를 사용하여 메모리 상에서 오디오 데이터를 로드
 audio_segment = AudioSegment.from_file(BytesIO(data), format="webm")
 directory = "dddd"
 # 오디오 파일로 저장
 #directory = str(names_sid.get(sid))
 if not os.path.exists(directory):
 os.makedirs(directory)
 
 # 오디오 파일로 저장
 file_path = os.path.join(directory, f'{sid}.wav')
 audio_segment.export(file_path, format='wav') 
 print('오디오 파일 저장 완료')`
 



and there is client






 
 
 <code class="echappe-js"><script src="https://cdnjs.cloudflare.com/ajax/libs/socket.io/4.5.2/socket.io.js"></script>




 






<script>&#xA; var socket = io(&#x27;http://127.0.0.1:5000&#x27;);&#xA; const record = document.getElementById("record")&#xA; const stop = document.getElementById("stop")&#xA; const soundClips = document.getElementById("sound-clips")&#xA; const chkHearMic = document.getElementById("chk-hear-mic")&#xA;&#xA; const audioCtx = new(window.AudioContext || window.webkitAudioContext)() // 오디오 컨텍스트 정의&#xA;&#xA; const analyser = audioCtx.createAnalyser()&#xA; // const distortion = audioCtx.createWaveShaper()&#xA; // const gainNode = audioCtx.createGain()&#xA; // const biquadFilter = audioCtx.createBiquadFilter()&#xA;&#xA; function makeSound(stream) {&#xA; const source = audioCtx.createMediaStreamSource(stream)&#xA; socket.connect()&#xA; source.connect(analyser)&#xA; // analyser.connect(distortion)&#xA; // distortion.connect(biquadFilter)&#xA; // biquadFilter.connect(gainNode)&#xA; // gainNode.connect(audioCtx.destination) // connecting the different audio graph nodes together&#xA; analyser.connect(audioCtx.destination)&#xA;&#xA; }&#xA;&#xA; if (navigator.mediaDevices) {&#xA; console.log(&#x27;getUserMedia supported.&#x27;)&#xA;&#xA; const constraints = {&#xA; audio: true&#xA; }&#xA; let chunks = []&#xA;&#xA; navigator.mediaDevices.getUserMedia(constraints)&#xA; .then(stream => {&#xA;&#xA; const mediaRecorder = new MediaRecorder(stream)&#xA; &#xA; chkHearMic.onchange = e => {&#xA; if(e.target.checked == true) {&#xA; audioCtx.resume()&#xA; makeSound(stream)&#xA; } else {&#xA; audioCtx.suspend()&#xA; }&#xA; }&#xA; &#xA; record.onclick = () => {&#xA; mediaRecorder.start(1000)&#xA; console.log(mediaRecorder.state)&#xA; console.log("recorder started")&#xA; record.style.background = "red"&#xA; record.style.color = "black"&#xA; }&#xA;&#xA; stop.onclick = () => {&#xA; mediaRecorder.stop()&#xA; console.log(mediaRecorder.state)&#xA; console.log("recorder stopped")&#xA; record.style.background = ""&#xA; record.style.color = ""&#xA; }&#xA;&#xA; mediaRecorder.onstop = e => {&#xA; console.log("data available after MediaRecorder.stop() called.")&#xA; const bb = new Blob(chunks, { &#x27;type&#x27; : &#x27;audio/wav&#x27; })&#xA; socket.emit(&#x27;voice&#x27;,bb)&#xA; const clipName = prompt("오디오 파일 제목을 입력하세요.", new Date())&#xA;&#xA; const clipContainer = document.createElement(&#x27;article&#x27;)&#xA; const clipLabel = document.createElement(&#x27;p&#x27;)&#xA; const audio = document.createElement(&#x27;audio&#x27;)&#xA; const deleteButton = document.createElement(&#x27;button&#x27;)&#xA;&#xA; clipContainer.classList.add(&#x27;clip&#x27;)&#xA; audio.setAttribute(&#x27;controls&#x27;, &#x27;&#x27;)&#xA; deleteButton.innerHTML = "삭제"&#xA; clipLabel.innerHTML = clipName&#xA;&#xA; clipContainer.appendChild(audio)&#xA; clipContainer.appendChild(clipLabel)&#xA; clipContainer.appendChild(deleteButton)&#xA; soundClips.appendChild(clipContainer)&#xA;&#xA; audio.controls = true&#xA; const blob = new Blob(chunks, {&#xA; &#x27;type&#x27;: &#x27;audio/ogg codecs=opus&#x27;&#xA; })&#xA;&#xA; chunks = []&#xA; const audioURL = URL.createObjectURL(blob)&#xA; audio.src = audioURL&#xA; console.log("recorder stopped")&#xA;&#xA; deleteButton.onclick = e => {&#xA; evtTgt = e.target&#xA; evtTgt .parentNode.parentNode.removeChild(evtTgt.parentNode)&#xA; }&#xA; }&#xA;&#xA; mediaRecorder.ondataavailable = function(e) {&#xA; chunks.push(e.data)&#xA; if (chunks.length >= 5)&#xA; {&#xA; const bloddb = new Blob(chunks, { &#x27;type&#x27; : &#x27;audio/wav&#x27; })&#xA; socket.emit(&#x27;voice&#x27;, bloddb)&#xA; &#xA; chunks = []&#xA; }&#xA; mediaRecorder.sendData = function(buffer) {&#xA; const bloddb = new Blob(buffer, { &#x27;type&#x27; : &#x27;audio/wav&#x27; })&#xA; socket.emit(&#x27;voice&#x27;, bloddb)&#xA;}&#xA;};&#xA; })&#xA; .catch(err => {&#xA; console.log(&#x27;The following error occurred: &#x27; &#x2B; err)&#xA; })&#xA; }&#xA; </script>




ask exception was never retrieved
future: <task finished="finished" coro="<InstrumentedAsyncServer._handle_event_internal()" defined="defined" at="at"> exception=CouldntDecodeError('Decoding failed. ffmpeg returned error code: 3199971767\n\nOutput from ffmpeg/avlib:\n\nffmpeg version 6.1.1-full_build-www.gyan.dev Copyright (c) 2000-2023 the FFmpeg developers\r\n built with gcc 12.2.0 (Rev10, Built by MSYS2 project)\r\n configuration: --enable-gpl --enable-version3 --enable-static --pkg-config=pkgconf --disable-w32threads --disable-autodetect --enable-fontconfig --enable-iconv --enable-gnutls --enable-libxml2 --enable-gmp --enable-bzlib --enable-lzma --enable-libsnappy --enable-zlib --enable-librist --enable-libsrt --enable-libssh --enable-libzmq --enable-avisynth --enable-libbluray --enable-libcaca --enable-sdl2 --enable-libaribb24 --enable-libaribcaption --enable-libdav1d --enable-libdavs2 --enable-libuavs3d --enable-libzvbi --enable-librav1e --enable-libsvtav1 --enable-libwebp --enable-libx264 --enable-libx265 --enable-libxavs2 --enable-libxvid --enable-libaom --enable-libjxl --enable-libopenjpeg --enable-libvpx --enable-mediafoundation --enable-libass --enable-frei0r --enable-libfreetype --enable-libfribidi --enable-libharfbuzz --enable-liblensfun --enable-libvidstab --enable-libvmaf --enable-libzimg --enable-amf --enable-cuda-llvm --enable-cuvid --enable-ffnvcodec --enable-nvdec --enable-nvenc --enable-dxva2 --enable-d3d11va --enable-libvpl --enable-libshaderc --enable-vulkan --enable-libplacebo --enable-opencl --enable-libcdio --enable-libgme --enable-libmodplug --enable-libopenmpt --enable-libopencore-amrwb --enable-libmp3lame --enable-libshine --enable-libtheora --enable-libtwolame --enable-libvo-amrwbenc --enable-libcodec2 --enable-libilbc --enable-libgsm --enable-libopencore-amrnb --enable-libopus --enable-libspeex --enable-libvorbis --enable-ladspa --enable-libbs2b --enable-libflite --enable-libmysofa --enable-librubberband --enable-libsoxr --enable-chromaprint\r\n libavutil 58. 29.100 / 58. 29.100\r\n libavcodec 60. 31.102 / 60. 31.102\r\n libavformat 60. 16.100 / 60. 16.100\r\n libavdevice 60. 3.100 / 60. 3.100\r\n libavfilter 9. 12.100 / 9. 12.100\r\n libswscale 7. 5.100 / 7. 5.100\r\n libswresample 4. 12.100 / 4. 12.100\r\n libpostproc 57. 3.100 / 57. 3.100\r\n[cache @ 000001d9828efe40] Inner protocol failed to seekback end : -40\r\n[matroska,webm @ 000001d9828efa00] EBML header parsing failed\r\n[cache @ 000001d9828efe40] Statistics, cache hits:0 cache misses:3\r\n[in#0 @ 000001d9828da3c0] Error opening input: Invalid data found when processing input\r\nError opening input file cache:pipe:0.\r\nError opening input files: Invalid data found when processing input\r\n')>
Traceback (most recent call last):
 File "f:\fastapi-socketio-wb38\.vent\Lib\site-packages\socketio\async_admin.py", line 276, in _handle_event_internal
 ret = await self.sio.__handle_event_internal(server, sid, eio_sid,
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 File "f:\fastapi-socketio-wb38\.vent\Lib\site-packages\socketio\async_server.py", line 597, in _handle_event_internal
 r = await server._trigger_event(data[0], namespace, sid, *data[1:])
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 File "f:\fastapi-socketio-wb38\.vent\Lib\site-packages\socketio\async_server.py", line 635, in _trigger_event
 ret = handler(*args)
 ^^^^^^^^^^^^^^
 File "f:\fastapi-socketio-wb38\Python-Javascript-Websocket-Video-Streaming--main\poom2.py", line 153, in handle_voice
 audio_segment = AudioSegment.from_file(BytesIO(data), format="webm")
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 File "f:\fastapi-socketio-wb38\.vent\Lib\site-packages\pydub\audio_segment.py", line 773, in from_file
 raise CouldntDecodeError(
pydub.exceptions.CouldntDecodeError: Decoding failed. ffmpeg returned error code: 3199971767

Output from ffmpeg/avlib:

ffmpeg version 6.1.1-full_build-www.gyan.dev Copyright (c) 2000-2023 the FFmpeg developers
 built with gcc 12.2.0 (Rev10, Built by MSYS2 project)
 configuration: --enable-gpl --enable-version3 --enable-static --pkg-config=pkgconf --disable-w32threads --disable-autodetect --enable-fontconfig --enable-iconv --enable-gnutls --enable-libxml2 --enable-gmp --enable-bzlib --enable-lzma --enable-libsnappy --enable-zlib --enable-librist --enable-libsrt --enable-libssh --enable-libzmq --enable-avisynth --enable-libbluray --enable-libcaca --enable-sdl2 --enable-libaribb24 --enable-libaribcaption --enable-libdav1d --enable-libdavs2 --enable-libuavs3d --enable-libzvbi --enable-librav1e --enable-libsvtav1 --enable-libwebp --enable-libx264 --enable-libx265 --enable-libxavs2 --enable-libxvid --enable-libaom --enable-libjxl --enable-libopenjpeg --enable-libvpx --enable-mediafoundation --enable-libass --enable-frei0r --enable-libfreetype --enable-libfribidi --enable-libharfbuzz --enable-liblensfun --enable-libvidstab --enable-libvmaf --enable-libzimg --enable-amf --enable-cuda-llvm --enable-cuvid --enable-ffnvcodec --enable-nvdec --enable-nvenc --enable-dxva2 --enable-d3d11va --enable-libvpl --enable-libshaderc --enable-vulkan --enable-libplacebo --enable-opencl --enable-libcdio --enable-libgme --enable-libmodplug --enable-libopenmpt --enable-libopencore-amrwb --enable-libmp3lame --enable-libshine --enable-libtheora --enable-libtwolame --enable-libvo-amrwbenc --enable-libcodec2 --enable-libilbc --enable-libgsm --enable-libopencore-amrnb --enable-libopus --enable-libspeex --enable-libvorbis --enable-ladspa --enable-libbs2b --enable-libflite --enable-libmysofa --enable-librubberband --enable-libsoxr --enable-chromaprint
 libavutil 58. 29.100 / 58. 29.100
 libavcodec 60. 31.102 / 60. 31.102
 libavformat 60. 16.100 / 60. 16.100
 libavdevice 60. 3.100 / 60. 3.100
 libavfilter 9. 12.100 / 9. 12.100
 libswscale 7. 5.100 / 7. 5.100
 libswresample 4. 12.100 / 4. 12.100
 libpostproc 57. 3.100 / 57. 3.100
[cache @ 000001d9828efe40] Inner protocol failed to seekback end : -40
[matroska,webm @ 000001d9828efa00] EBML header parsing failed
[cache @ 000001d9828efe40] Statistics, cache hits:0 cache misses:3
[in#0 @ 000001d9828da3c0] Error opening input: Invalid data found when processing input
Error opening input file cache:pipe:0.
Error opening input files: Invalid data found when processing input
</task>


im using version of ffmpeg-6.1.1-full_build.
i dont know this error exist the stop button sent event correctly. but chunk data was not work correctly in python server.
my english was so bad. sry


-
Transcode webcam blob to RTMP using ffmpeg.wasm
29 novembre 2023, par hassan moradnezhadI'm trying transcode webcam blob data to a rtmp server from browser by using ffmpeg.wasm .

first, i create a MediaStream.

const stream = await navigator.mediaDevices.getUserMedia({
 video: true,
 });



then, i create a MediaRecorder.


const recorder = new MediaRecorder(stream, {mimeType: "video/webm; codecs:vp9"});
 recorder.ondataavailable = handleDataAvailable;
 recorder.start(0)



when data is available, i call a function called
handleDataAvailable
.

here is the function.

const handleDataAvailable = (event: BlobEvent) => {
 console.log("data-available");
 if (event.data.size > 0) {
 recordedChunksRef.current.push(event.data);
 transcode(event.data)
 }
 };



in above code, i use another function which called
transcode
it's goal is going to send data to rtmp server using useffmpeg.wasm
.

here it is.

const transcode = async (inputVideo: Blob | undefined) => {
 const ffmpeg = ffmpegRef.current;
 const fetchFileOutput = await fetchFile(inputVideo)
 ffmpeg?.writeFile('input.webm', fetchFileOutput)

 const data = await ffmpeg?.readFile('input.webm');
 if (videoRef.current) {
 videoRef.current.src =
 URL.createObjectURL(new Blob([(data as any)?.buffer], {type: 'video/webm'}));
 }

 // execute by node-media-server config 1
 await ffmpeg?.exec(['-re', '-i', 'input.webm', '-c', 'copy', '-f', 'flv', "rtmp://localhost:1935/live/ttt"])

 // execute by node-media-server config 2
 // await ffmpeg?.exec(['-re', '-i', 'input.webm', '-c:v', 'libx264', '-preset', 'veryfast', '-tune', 'zerolatency', '-c:a', 'aac', '-ar', '44100', '-f', 'flv', 'rtmp://localhost:1935/live/ttt']);

 // execute by stack-over-flow config 1
 // await ffmpeg?.exec(['-re', '-i', 'input.webm', '-c:v', 'h264', '-c:a', 'aac', '-f', 'flv', "rtmp://localhost:1935/live/ttt"]);

 // execute by stack-over-flow config 2
 // await ffmpeg?.exec(['-i', 'input.webm', '-c:v', 'libx264', '-flags:v', '+global_header', '-c:a', 'aac', '-ac', '2', '-f', 'flv', "rtmp://localhost:1935/live/ttt"]);

 // execute by stack-over-flow config 3
 // await ffmpeg?.exec(['-i', 'input.webm', '-acodec', 'aac', '-ac', '2', '-strict', 'experimental', '-ab', '160k', '-vcodec', 'libx264', '-preset', 'slow', '-profile:v', 'baseline', '-level', '30', '-maxrate', '10000000', '-bufsize', '10000000', '-b', '1000k', '-f', 'flv', 'rtmp://localhost:1935/live/ttt']);

 }



after running app and start streaming, console logs are as below.


ffmpeg >>> ffmpeg version 5.1.3 Copyright (c) 2000-2022 the FFmpeg developers index.tsx:81:20
ffmpeg >>> built with emcc (Emscripten gcc/clang-like replacement + linker emulating GNU ld) 3.1.40 (5c27e79dd0a9c4e27ef2326841698cdd4f6b5784) index.tsx:81:20
ffmpeg >>> configuration: --target-os=none --arch=x86_32 --enable-cross-compile --disable-asm --disable-stripping --disable-programs --disable-doc --disable-debug --disable-runtime-cpudetect --disable-autodetect --nm=emnm --ar=emar --ranlib=emranlib --cc=emcc --cxx=em++ --objcc=emcc --dep-cc=emcc --extra-cflags='-I/opt/include -O3 -msimd128' --extra-cxxflags='-I/opt/include -O3 -msimd128' --disable-pthreads --disable-w32threads --disable-os2threads --enable-gpl --enable-libx264 --enable-libx265 --enable-libvpx --enable-libmp3lame --enable-libtheora --enable-libvorbis --enable-libopus --enable-zlib --enable-libwebp --enable-libfreetype --enable-libfribidi --enable-libass --enable-libzimg index.tsx:81:20
ffmpeg >>> libavutil 57. 28.100 / 57. 28.100 index.tsx:81:20
ffmpeg >>> libavcodec 59. 37.100 / 59. 37.100 index.tsx:81:20
ffmpeg >>> libavformat 59. 27.100 / 59. 27.100 index.tsx:81:20
ffmpeg >>> libavdevice 59. 7.100 / 59. 7.100 index.tsx:81:20
ffmpeg >>> libavfilter 8. 44.100 / 8. 44.100 index.tsx:81:20
ffmpeg >>> libswscale 6. 7.100 / 6. 7.100 index.tsx:81:20
ffmpeg >>> libswresample 4. 7.100 / 4. 7.100 index.tsx:81:20
ffmpeg >>> libpostproc 56. 6.100 / 56. 6.100 index.tsx:81:20
ffmpeg >>> Input #0, matroska,webm, from 'input.webm': index.tsx:81:20
ffmpeg >>> Metadata: index.tsx:81:20
ffmpeg >>> encoder : QTmuxingAppLibWebM-0.0.1 index.tsx:81:20
ffmpeg >>> Duration: N/A, start: 0.000000, bitrate: N/A index.tsx:81:20
ffmpeg >>> Stream #0:0(eng): Video: vp8, yuv420p(progressive), 640x480, SAR 1:1 DAR 4:3, 15.50 tbr, 1k tbn (default)



the problem is when
ffmpeg.wasm
try to execute the last command.

await ffmpeg?.exec(['-re', '-i', 'input.webm', '-c', 'copy', '-f', 'flv', "rtmp://localhost:1935/live/ttt"])
.

it just calls aGET Request
, I will send further details about this request.

as u can see, i try to use lots of arg sample withffmpeg?.exec
, but non of them works.

the network tab in browser, after
ffmpeg.wasm
execute the command is as below.



it send a
GET request
tows://localhost:1935/

and nothing happened after that.

for backend, i use node-media-server and here is my output logs when
ffmpeg.wasm
trying to execute the args

11/28/2023 19:33:18 55301 [INFO] [rtmp disconnect] id=JL569YOF
[NodeEvent on doneConnect] id=JL569YOF args=undefined



at last here are my ques




- 

- how can i achive this option ?
- is it possible to share webcam to rtmp server ?








-
How do i play an HLS stream when playlist.m3u8 file is constantly being updated ?
3 janvier 2021, par Adnan AhmedI am using MediaRecorder to record chunks of my live video in webm format from MediaStream and converting these chunks to .ts files on the server using ffmpeg and then updating my playlist.m3u8 file with this code :


function generateM3u8Playlist(fileDataArr, playlistFp, isLive, cb) {
 var durations = fileDataArr.map(function(fd) {
 return fd.duration;
 });
 var maxT = maxOfArr(durations);

 var meta = [
 '#EXTM3U',
 '#EXT-X-VERSION:3',
 '#EXT-X-MEDIA-SEQUENCE:0',
 '#EXT-X-ALLOW-CACHE:YES',
 '#EXT-X-TARGETDURATION:' + Math.ceil(maxT),
 ];

 fileDataArr.forEach(function(fd) {
 meta.push('#EXTINF:' + fd.duration.toFixed(2) + ',');
 meta.push(fd.fileName2);
 });

 if (!isLive) {
 meta.push('#EXT-X-ENDLIST');
 }

 meta.push('');
 meta = meta.join('\n');

 fs.writeFile(playlistFp, meta, cb);
}



Here
fileDataArr
holds information for all the chunks that have been created.

After that i use this code to create a hls server :


var runStreamServer = (function(streamFolder) {
 var executed = false;
 return function(streamFolder) {
 if (!executed) {
 executed = true;
 var HLSServer = require('hls-server')
 var http = require('http')

 var server = http.createServer()
 var hls = new HLSServer(server, {
 path: '/stream', // Base URI to output HLS streams
 dir: 'C:\\Users\\Work\\Desktop\\live-stream\\webcam2hls\\videos\\' + streamFolder // Directory that input files are stored
 })
 console.log("We are going to stream from folder:" + streamFolder);
 server.listen(8000);
 console.log('Server Listening on Port 8000');
 }
 };
})();



The problem is that if i stop creating new chunks and then use the hls server link :

http://localhost:8000/stream/playlist.m3u8
then the video plays in VLC but if i try to play during the recording it keeps loading the file but does not play. I want it to play while its creating new chunks and updating playlist.m3u8. The quirk ingenerateM3u8Playlist
function is that it adds'#EXT-X-ENDLIST'
to the playlist file after i have stopped recording.
The software is still in production so its a bit messy code. Thank you for any answers.

The client side that generates blobs is as follows :


var mediaConstraints = {
 video: true,
 audio:true
 };
navigator.getUserMedia(mediaConstraints, onMediaSuccess, onMediaError);
function onMediaSuccess(stream) {
 console.log('will start capturing and sending ' + (DT / 1000) + 's videos when you press start');
 var mediaRecorder = new MediaStreamRecorder(stream);

 mediaRecorder.mimeType = 'video/webm';

 mediaRecorder.ondataavailable = function(blob) {
 var count2 = zeroPad(count, 5);
 // here count2 just creates a blob number 
 console.log('sending chunk ' + name + ' #' + count2 + '...');
 send('/chunk/' + name + '/' + count2 + (stopped ? '/finish' : ''), blob);
 ++count;
 };
 }
// Here we have the send function which sends our blob to server:
 function send(url, blob) {
 var xhr = new XMLHttpRequest();
 xhr.open('POST', url, true);

 xhr.responseType = 'text/plain';
 xhr.setRequestHeader('Content-Type', 'video/webm');
 //xhr.setRequestHeader("Content-Length", blob.length);

 xhr.onload = function(e) {
 if (this.status === 200) {
 console.log(this.response);
 }
 };
 xhr.send(blob);
 }



The code that receives the XHR request is as follows :


var parts = u.split('/');
 var prefix = parts[2];
 var num = parts[3];
 var isFirst = false;
 var isLast = !!parts[4];

 if ((/^0+$/).test(num)) {
 var path = require('path');
 shell.mkdir(path.join(__dirname, 'videos', prefix));
 isFirst = true;
 }

 var fp = 'videos/' + prefix + '/' + num + '.webm';
 var msg = 'got ' + fp;
 console.log(msg);
 console.log('isFirst:%s, isLast:%s', isFirst, isLast);

 var stream = fs.createWriteStream(fp, { encoding: 'binary' });
 /*stream.on('end', function() {
 respond(res, ['text/plain', msg]);
 });*/

 //req.setEncoding('binary');

 req.pipe(stream);
 req.on('end', function() {
 respond(res, ['text/plain', msg]);

 if (!LIVE) { return; }

 var duration = 20;
 var fd = {
 fileName: num + '.webm',
 filePath: fp,
 duration: duration
 };
 var fileDataArr;
 if (isFirst) {
 fileDataArr = [];
 fileDataArrs[prefix] = fileDataArr;
 } else {
 var fileDataArr = fileDataArrs[prefix];
 }
 try {
 fileDataArr.push(fd);
 } catch (err) {
 fileDataArr = [];
 console.log(err.message);
 }
 videoUtils.computeStartTimes(fileDataArr);

 videoUtils.webm2Mpegts(fd, function(err, mpegtsFp) {
 if (err) { return console.error(err); }
 console.log('created %s', mpegtsFp);

 var playlistFp = 'videos/' + prefix + '/playlist.m3u8';

 var fileDataArr2 = (isLast ? fileDataArr : lastN(fileDataArr, PREV_ITEMS_IN_LIVE));

 var action = (isFirst ? 'created' : (isLast ? 'finished' : 'updated'));

 videoUtils.generateM3u8Playlist(fileDataArr2, playlistFp, !isLast, function(err) {
 console.log('playlist %s %s', playlistFp, (err ? err.toString() : action));
 });
 });


 runStreamServer(prefix);
 }