
Recherche avancée
Autres articles (94)
-
Configuration spécifique d’Apache
4 février 2011, parModules spécifiques
Pour la configuration d’Apache, il est conseillé d’activer certains modules non spécifiques à MediaSPIP, mais permettant d’améliorer les performances : mod_deflate et mod_headers pour compresser automatiquement via Apache les pages. Cf ce tutoriel ; mode_expires pour gérer correctement l’expiration des hits. Cf ce tutoriel ;
Il est également conseillé d’ajouter la prise en charge par apache du mime-type pour les fichiers WebM comme indiqué dans ce tutoriel.
Création d’un (...) -
Submit bugs and patches
13 avril 2011Unfortunately a software is never perfect.
If you think you have found a bug, report it using our ticket system. Please to help us to fix it by providing the following information : the browser you are using, including the exact version as precise an explanation as possible of the problem if possible, the steps taken resulting in the problem a link to the site / page in question
If you think you have solved the bug, fill in a ticket and attach to it a corrective patch.
You may also (...) -
Contribute to translation
13 avril 2011You can help us to improve the language used in the software interface to make MediaSPIP more accessible and user-friendly. You can also translate the interface into any language that allows it to spread to new linguistic communities.
To do this, we use the translation interface of SPIP where the all the language modules of MediaSPIP are available. Just subscribe to the mailing list and request further informantion on translation.
MediaSPIP is currently available in French and English (...)
Sur d’autres sites (7210)
-
ffmpeg Audiosegment error in get audio chunks in socketIo server in python
26 janvier 2024, par a_crszkvc30Last_NameColI want to send each audio chunk every minute.
this is the test code and i want to save audiofile and audio chunk file.
then, i will combine two audio files stop button was worked correctly but with set time function is not worked in python server.
there is python server code with socketio


def handle_voice(sid,data): # blob 으로 들어온 데이터 
 # BytesIO를 사용하여 메모리 상에서 오디오 데이터를 로드
 audio_segment = AudioSegment.from_file(BytesIO(data), format="webm")
 directory = "dddd"
 # 오디오 파일로 저장
 #directory = str(names_sid.get(sid))
 if not os.path.exists(directory):
 os.makedirs(directory)
 
 # 오디오 파일로 저장
 file_path = os.path.join(directory, f'{sid}.wav')
 audio_segment.export(file_path, format='wav') 
 print('오디오 파일 저장 완료')`
 



and there is client






 
 
 <code class="echappe-js"><script src="https://cdnjs.cloudflare.com/ajax/libs/socket.io/4.5.2/socket.io.js"></script>




 






<script>&#xA; var socket = io(&#x27;http://127.0.0.1:5000&#x27;);&#xA; const record = document.getElementById("record")&#xA; const stop = document.getElementById("stop")&#xA; const soundClips = document.getElementById("sound-clips")&#xA; const chkHearMic = document.getElementById("chk-hear-mic")&#xA;&#xA; const audioCtx = new(window.AudioContext || window.webkitAudioContext)() // 오디오 컨텍스트 정의&#xA;&#xA; const analyser = audioCtx.createAnalyser()&#xA; // const distortion = audioCtx.createWaveShaper()&#xA; // const gainNode = audioCtx.createGain()&#xA; // const biquadFilter = audioCtx.createBiquadFilter()&#xA;&#xA; function makeSound(stream) {&#xA; const source = audioCtx.createMediaStreamSource(stream)&#xA; socket.connect()&#xA; source.connect(analyser)&#xA; // analyser.connect(distortion)&#xA; // distortion.connect(biquadFilter)&#xA; // biquadFilter.connect(gainNode)&#xA; // gainNode.connect(audioCtx.destination) // connecting the different audio graph nodes together&#xA; analyser.connect(audioCtx.destination)&#xA;&#xA; }&#xA;&#xA; if (navigator.mediaDevices) {&#xA; console.log(&#x27;getUserMedia supported.&#x27;)&#xA;&#xA; const constraints = {&#xA; audio: true&#xA; }&#xA; let chunks = []&#xA;&#xA; navigator.mediaDevices.getUserMedia(constraints)&#xA; .then(stream => {&#xA;&#xA; const mediaRecorder = new MediaRecorder(stream)&#xA; &#xA; chkHearMic.onchange = e => {&#xA; if(e.target.checked == true) {&#xA; audioCtx.resume()&#xA; makeSound(stream)&#xA; } else {&#xA; audioCtx.suspend()&#xA; }&#xA; }&#xA; &#xA; record.onclick = () => {&#xA; mediaRecorder.start(1000)&#xA; console.log(mediaRecorder.state)&#xA; console.log("recorder started")&#xA; record.style.background = "red"&#xA; record.style.color = "black"&#xA; }&#xA;&#xA; stop.onclick = () => {&#xA; mediaRecorder.stop()&#xA; console.log(mediaRecorder.state)&#xA; console.log("recorder stopped")&#xA; record.style.background = ""&#xA; record.style.color = ""&#xA; }&#xA;&#xA; mediaRecorder.onstop = e => {&#xA; console.log("data available after MediaRecorder.stop() called.")&#xA; const bb = new Blob(chunks, { &#x27;type&#x27; : &#x27;audio/wav&#x27; })&#xA; socket.emit(&#x27;voice&#x27;,bb)&#xA; const clipName = prompt("오디오 파일 제목을 입력하세요.", new Date())&#xA;&#xA; const clipContainer = document.createElement(&#x27;article&#x27;)&#xA; const clipLabel = document.createElement(&#x27;p&#x27;)&#xA; const audio = document.createElement(&#x27;audio&#x27;)&#xA; const deleteButton = document.createElement(&#x27;button&#x27;)&#xA;&#xA; clipContainer.classList.add(&#x27;clip&#x27;)&#xA; audio.setAttribute(&#x27;controls&#x27;, &#x27;&#x27;)&#xA; deleteButton.innerHTML = "삭제"&#xA; clipLabel.innerHTML = clipName&#xA;&#xA; clipContainer.appendChild(audio)&#xA; clipContainer.appendChild(clipLabel)&#xA; clipContainer.appendChild(deleteButton)&#xA; soundClips.appendChild(clipContainer)&#xA;&#xA; audio.controls = true&#xA; const blob = new Blob(chunks, {&#xA; &#x27;type&#x27;: &#x27;audio/ogg codecs=opus&#x27;&#xA; })&#xA;&#xA; chunks = []&#xA; const audioURL = URL.createObjectURL(blob)&#xA; audio.src = audioURL&#xA; console.log("recorder stopped")&#xA;&#xA; deleteButton.onclick = e => {&#xA; evtTgt = e.target&#xA; evtTgt .parentNode.parentNode.removeChild(evtTgt.parentNode)&#xA; }&#xA; }&#xA;&#xA; mediaRecorder.ondataavailable = function(e) {&#xA; chunks.push(e.data)&#xA; if (chunks.length >= 5)&#xA; {&#xA; const bloddb = new Blob(chunks, { &#x27;type&#x27; : &#x27;audio/wav&#x27; })&#xA; socket.emit(&#x27;voice&#x27;, bloddb)&#xA; &#xA; chunks = []&#xA; }&#xA; mediaRecorder.sendData = function(buffer) {&#xA; const bloddb = new Blob(buffer, { &#x27;type&#x27; : &#x27;audio/wav&#x27; })&#xA; socket.emit(&#x27;voice&#x27;, bloddb)&#xA;}&#xA;};&#xA; })&#xA; .catch(err => {&#xA; console.log(&#x27;The following error occurred: &#x27; &#x2B; err)&#xA; })&#xA; }&#xA; </script>




ask exception was never retrieved
future: <task finished="finished" coro="<InstrumentedAsyncServer._handle_event_internal()" defined="defined" at="at"> exception=CouldntDecodeError('Decoding failed. ffmpeg returned error code: 3199971767\n\nOutput from ffmpeg/avlib:\n\nffmpeg version 6.1.1-full_build-www.gyan.dev Copyright (c) 2000-2023 the FFmpeg developers\r\n built with gcc 12.2.0 (Rev10, Built by MSYS2 project)\r\n configuration: --enable-gpl --enable-version3 --enable-static --pkg-config=pkgconf --disable-w32threads --disable-autodetect --enable-fontconfig --enable-iconv --enable-gnutls --enable-libxml2 --enable-gmp --enable-bzlib --enable-lzma --enable-libsnappy --enable-zlib --enable-librist --enable-libsrt --enable-libssh --enable-libzmq --enable-avisynth --enable-libbluray --enable-libcaca --enable-sdl2 --enable-libaribb24 --enable-libaribcaption --enable-libdav1d --enable-libdavs2 --enable-libuavs3d --enable-libzvbi --enable-librav1e --enable-libsvtav1 --enable-libwebp --enable-libx264 --enable-libx265 --enable-libxavs2 --enable-libxvid --enable-libaom --enable-libjxl --enable-libopenjpeg --enable-libvpx --enable-mediafoundation --enable-libass --enable-frei0r --enable-libfreetype --enable-libfribidi --enable-libharfbuzz --enable-liblensfun --enable-libvidstab --enable-libvmaf --enable-libzimg --enable-amf --enable-cuda-llvm --enable-cuvid --enable-ffnvcodec --enable-nvdec --enable-nvenc --enable-dxva2 --enable-d3d11va --enable-libvpl --enable-libshaderc --enable-vulkan --enable-libplacebo --enable-opencl --enable-libcdio --enable-libgme --enable-libmodplug --enable-libopenmpt --enable-libopencore-amrwb --enable-libmp3lame --enable-libshine --enable-libtheora --enable-libtwolame --enable-libvo-amrwbenc --enable-libcodec2 --enable-libilbc --enable-libgsm --enable-libopencore-amrnb --enable-libopus --enable-libspeex --enable-libvorbis --enable-ladspa --enable-libbs2b --enable-libflite --enable-libmysofa --enable-librubberband --enable-libsoxr --enable-chromaprint\r\n libavutil 58. 29.100 / 58. 29.100\r\n libavcodec 60. 31.102 / 60. 31.102\r\n libavformat 60. 16.100 / 60. 16.100\r\n libavdevice 60. 3.100 / 60. 3.100\r\n libavfilter 9. 12.100 / 9. 12.100\r\n libswscale 7. 5.100 / 7. 5.100\r\n libswresample 4. 12.100 / 4. 12.100\r\n libpostproc 57. 3.100 / 57. 3.100\r\n[cache @ 000001d9828efe40] Inner protocol failed to seekback end : -40\r\n[matroska,webm @ 000001d9828efa00] EBML header parsing failed\r\n[cache @ 000001d9828efe40] Statistics, cache hits:0 cache misses:3\r\n[in#0 @ 000001d9828da3c0] Error opening input: Invalid data found when processing input\r\nError opening input file cache:pipe:0.\r\nError opening input files: Invalid data found when processing input\r\n')>
Traceback (most recent call last):
 File "f:\fastapi-socketio-wb38\.vent\Lib\site-packages\socketio\async_admin.py", line 276, in _handle_event_internal
 ret = await self.sio.__handle_event_internal(server, sid, eio_sid,
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 File "f:\fastapi-socketio-wb38\.vent\Lib\site-packages\socketio\async_server.py", line 597, in _handle_event_internal
 r = await server._trigger_event(data[0], namespace, sid, *data[1:])
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 File "f:\fastapi-socketio-wb38\.vent\Lib\site-packages\socketio\async_server.py", line 635, in _trigger_event
 ret = handler(*args)
 ^^^^^^^^^^^^^^
 File "f:\fastapi-socketio-wb38\Python-Javascript-Websocket-Video-Streaming--main\poom2.py", line 153, in handle_voice
 audio_segment = AudioSegment.from_file(BytesIO(data), format="webm")
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 File "f:\fastapi-socketio-wb38\.vent\Lib\site-packages\pydub\audio_segment.py", line 773, in from_file
 raise CouldntDecodeError(
pydub.exceptions.CouldntDecodeError: Decoding failed. ffmpeg returned error code: 3199971767

Output from ffmpeg/avlib:

ffmpeg version 6.1.1-full_build-www.gyan.dev Copyright (c) 2000-2023 the FFmpeg developers
 built with gcc 12.2.0 (Rev10, Built by MSYS2 project)
 configuration: --enable-gpl --enable-version3 --enable-static --pkg-config=pkgconf --disable-w32threads --disable-autodetect --enable-fontconfig --enable-iconv --enable-gnutls --enable-libxml2 --enable-gmp --enable-bzlib --enable-lzma --enable-libsnappy --enable-zlib --enable-librist --enable-libsrt --enable-libssh --enable-libzmq --enable-avisynth --enable-libbluray --enable-libcaca --enable-sdl2 --enable-libaribb24 --enable-libaribcaption --enable-libdav1d --enable-libdavs2 --enable-libuavs3d --enable-libzvbi --enable-librav1e --enable-libsvtav1 --enable-libwebp --enable-libx264 --enable-libx265 --enable-libxavs2 --enable-libxvid --enable-libaom --enable-libjxl --enable-libopenjpeg --enable-libvpx --enable-mediafoundation --enable-libass --enable-frei0r --enable-libfreetype --enable-libfribidi --enable-libharfbuzz --enable-liblensfun --enable-libvidstab --enable-libvmaf --enable-libzimg --enable-amf --enable-cuda-llvm --enable-cuvid --enable-ffnvcodec --enable-nvdec --enable-nvenc --enable-dxva2 --enable-d3d11va --enable-libvpl --enable-libshaderc --enable-vulkan --enable-libplacebo --enable-opencl --enable-libcdio --enable-libgme --enable-libmodplug --enable-libopenmpt --enable-libopencore-amrwb --enable-libmp3lame --enable-libshine --enable-libtheora --enable-libtwolame --enable-libvo-amrwbenc --enable-libcodec2 --enable-libilbc --enable-libgsm --enable-libopencore-amrnb --enable-libopus --enable-libspeex --enable-libvorbis --enable-ladspa --enable-libbs2b --enable-libflite --enable-libmysofa --enable-librubberband --enable-libsoxr --enable-chromaprint
 libavutil 58. 29.100 / 58. 29.100
 libavcodec 60. 31.102 / 60. 31.102
 libavformat 60. 16.100 / 60. 16.100
 libavdevice 60. 3.100 / 60. 3.100
 libavfilter 9. 12.100 / 9. 12.100
 libswscale 7. 5.100 / 7. 5.100
 libswresample 4. 12.100 / 4. 12.100
 libpostproc 57. 3.100 / 57. 3.100
[cache @ 000001d9828efe40] Inner protocol failed to seekback end : -40
[matroska,webm @ 000001d9828efa00] EBML header parsing failed
[cache @ 000001d9828efe40] Statistics, cache hits:0 cache misses:3
[in#0 @ 000001d9828da3c0] Error opening input: Invalid data found when processing input
Error opening input file cache:pipe:0.
Error opening input files: Invalid data found when processing input
</task>


im using version of ffmpeg-6.1.1-full_build.
i dont know this error exist the stop button sent event correctly. but chunk data was not work correctly in python server.
my english was so bad. sry


-
Transcode webcam blob to RTMP using ffmpeg.wasm
29 novembre 2023, par hassan moradnezhadI'm trying transcode webcam blob data to a rtmp server from browser by using ffmpeg.wasm .

first, i create a MediaStream.

const stream = await navigator.mediaDevices.getUserMedia({
 video: true,
 });



then, i create a MediaRecorder.


const recorder = new MediaRecorder(stream, {mimeType: "video/webm; codecs:vp9"});
 recorder.ondataavailable = handleDataAvailable;
 recorder.start(0)



when data is available, i call a function called
handleDataAvailable
.

here is the function.

const handleDataAvailable = (event: BlobEvent) => {
 console.log("data-available");
 if (event.data.size > 0) {
 recordedChunksRef.current.push(event.data);
 transcode(event.data)
 }
 };



in above code, i use another function which called
transcode
it's goal is going to send data to rtmp server using useffmpeg.wasm
.

here it is.

const transcode = async (inputVideo: Blob | undefined) => {
 const ffmpeg = ffmpegRef.current;
 const fetchFileOutput = await fetchFile(inputVideo)
 ffmpeg?.writeFile('input.webm', fetchFileOutput)

 const data = await ffmpeg?.readFile('input.webm');
 if (videoRef.current) {
 videoRef.current.src =
 URL.createObjectURL(new Blob([(data as any)?.buffer], {type: 'video/webm'}));
 }

 // execute by node-media-server config 1
 await ffmpeg?.exec(['-re', '-i', 'input.webm', '-c', 'copy', '-f', 'flv', "rtmp://localhost:1935/live/ttt"])

 // execute by node-media-server config 2
 // await ffmpeg?.exec(['-re', '-i', 'input.webm', '-c:v', 'libx264', '-preset', 'veryfast', '-tune', 'zerolatency', '-c:a', 'aac', '-ar', '44100', '-f', 'flv', 'rtmp://localhost:1935/live/ttt']);

 // execute by stack-over-flow config 1
 // await ffmpeg?.exec(['-re', '-i', 'input.webm', '-c:v', 'h264', '-c:a', 'aac', '-f', 'flv', "rtmp://localhost:1935/live/ttt"]);

 // execute by stack-over-flow config 2
 // await ffmpeg?.exec(['-i', 'input.webm', '-c:v', 'libx264', '-flags:v', '+global_header', '-c:a', 'aac', '-ac', '2', '-f', 'flv', "rtmp://localhost:1935/live/ttt"]);

 // execute by stack-over-flow config 3
 // await ffmpeg?.exec(['-i', 'input.webm', '-acodec', 'aac', '-ac', '2', '-strict', 'experimental', '-ab', '160k', '-vcodec', 'libx264', '-preset', 'slow', '-profile:v', 'baseline', '-level', '30', '-maxrate', '10000000', '-bufsize', '10000000', '-b', '1000k', '-f', 'flv', 'rtmp://localhost:1935/live/ttt']);

 }



after running app and start streaming, console logs are as below.


ffmpeg >>> ffmpeg version 5.1.3 Copyright (c) 2000-2022 the FFmpeg developers index.tsx:81:20
ffmpeg >>> built with emcc (Emscripten gcc/clang-like replacement + linker emulating GNU ld) 3.1.40 (5c27e79dd0a9c4e27ef2326841698cdd4f6b5784) index.tsx:81:20
ffmpeg >>> configuration: --target-os=none --arch=x86_32 --enable-cross-compile --disable-asm --disable-stripping --disable-programs --disable-doc --disable-debug --disable-runtime-cpudetect --disable-autodetect --nm=emnm --ar=emar --ranlib=emranlib --cc=emcc --cxx=em++ --objcc=emcc --dep-cc=emcc --extra-cflags='-I/opt/include -O3 -msimd128' --extra-cxxflags='-I/opt/include -O3 -msimd128' --disable-pthreads --disable-w32threads --disable-os2threads --enable-gpl --enable-libx264 --enable-libx265 --enable-libvpx --enable-libmp3lame --enable-libtheora --enable-libvorbis --enable-libopus --enable-zlib --enable-libwebp --enable-libfreetype --enable-libfribidi --enable-libass --enable-libzimg index.tsx:81:20
ffmpeg >>> libavutil 57. 28.100 / 57. 28.100 index.tsx:81:20
ffmpeg >>> libavcodec 59. 37.100 / 59. 37.100 index.tsx:81:20
ffmpeg >>> libavformat 59. 27.100 / 59. 27.100 index.tsx:81:20
ffmpeg >>> libavdevice 59. 7.100 / 59. 7.100 index.tsx:81:20
ffmpeg >>> libavfilter 8. 44.100 / 8. 44.100 index.tsx:81:20
ffmpeg >>> libswscale 6. 7.100 / 6. 7.100 index.tsx:81:20
ffmpeg >>> libswresample 4. 7.100 / 4. 7.100 index.tsx:81:20
ffmpeg >>> libpostproc 56. 6.100 / 56. 6.100 index.tsx:81:20
ffmpeg >>> Input #0, matroska,webm, from 'input.webm': index.tsx:81:20
ffmpeg >>> Metadata: index.tsx:81:20
ffmpeg >>> encoder : QTmuxingAppLibWebM-0.0.1 index.tsx:81:20
ffmpeg >>> Duration: N/A, start: 0.000000, bitrate: N/A index.tsx:81:20
ffmpeg >>> Stream #0:0(eng): Video: vp8, yuv420p(progressive), 640x480, SAR 1:1 DAR 4:3, 15.50 tbr, 1k tbn (default)



the problem is when
ffmpeg.wasm
try to execute the last command.

await ffmpeg?.exec(['-re', '-i', 'input.webm', '-c', 'copy', '-f', 'flv', "rtmp://localhost:1935/live/ttt"])
.

it just calls aGET Request
, I will send further details about this request.

as u can see, i try to use lots of arg sample withffmpeg?.exec
, but non of them works.

the network tab in browser, after
ffmpeg.wasm
execute the command is as below.



it send a
GET request
tows://localhost:1935/

and nothing happened after that.

for backend, i use node-media-server and here is my output logs when
ffmpeg.wasm
trying to execute the args

11/28/2023 19:33:18 55301 [INFO] [rtmp disconnect] id=JL569YOF
[NodeEvent on doneConnect] id=JL569YOF args=undefined



at last here are my ques




- 

- how can i achive this option ?
- is it possible to share webcam to rtmp server ?








-
webrtc to rtmp send video from camera to rtmp link
14 avril 2024, par Leo-Mahendrai cant send the video from webrtc which is converted to bufferd data for every 10seconds and send to server.js where it takes it via websockets and convert it to flv format using ffmpeg.


i am trying to send it to rtmp server named restreamer for start, here i tried to convert the buffer data and send it to rtmp link using ffmpeg commands, where i initially started to suceesfully save the file from webrtc to mp4 format for a duration of 2-3 minute.


after i tried to use webrtc to send video data for every 10 seconds and in server i tried to send it to rtmp but i cant send it, but i can see the connection of rtmp url and server is been taken place but i cant see the video i can see the logs in rtmp server as


2024-04-14 12:35:45 ts=2024-04-14T07:05:45Z level=INFO component="RTMP" msg="no streams available" action="INVALID" address=":1935" client="172.17.0.1:37700" path="/3d30c5a9-2059-4843-8957-da963c7bc19b.stream" who="PUBLISH"
2024-04-14 12:35:45 ts=2024-04-14T07:05:45Z level=INFO component="RTMP" msg="no streams available" action="INVALID" address=":1935" client="172.17.0.1:37716" path="/3d30c5a9-2059-4843-8957-da963c7bc19b.stream" who="PUBLISH"
2024-04-14 12:35:45 ts=2024-04-14T07:05:45Z level=INFO component="RTMP" msg="no streams available" action="INVALID" address=":1935" client="172.17.0.1:37728" path="/3d30c5a9-2059-4843-8957-da963c7bc19b.stream" who="PUBLISH" 



my frontend code


const handleSendVideo = async () => {
 console.log("start");
 
 if (!ws) {
 console.error('WebSocket connection not established.');
 return;
 }
 
 try {
 const videoStream = await navigator.mediaDevices.getUserMedia({ video: true });
 const mediaRecorder = new MediaRecorder(videoStream);
 
 const requiredFrameSize = 460800;
 const frameDuration = 10 * 1000; // 10 seconds in milliseconds
 
 mediaRecorder.ondataavailable = async (event) => {
 if (ws.readyState !== WebSocket.OPEN) {
 console.error('WebSocket connection is not open.');
 return;
 }
 
 if (event.data.size > 0) {
 const arrayBuffer = await event.data.arrayBuffer();
 const uint8Array = new Uint8Array(arrayBuffer);
 
 const width = videoStream.getVideoTracks()[0].getSettings().width;
 const height = videoStream.getVideoTracks()[0].getSettings().height;
 
 const numFrames = Math.ceil(uint8Array.length / requiredFrameSize);
 
 for (let i = 0; i < numFrames; i++) {
 const start = i * requiredFrameSize;
 const end = Math.min((i + 1) * requiredFrameSize, uint8Array.length);
 let frameData = uint8Array.subarray(start, end);
 
 // Pad or trim the frameData to match the required size
 if (frameData.length < requiredFrameSize) {
 // Pad with zeros to reach the required size
 const paddedData = new Uint8Array(requiredFrameSize);
 paddedData.set(frameData, 0);
 frameData = paddedData;
 } else if (frameData.length > requiredFrameSize) {
 // Trim to match the required size
 frameData = frameData.subarray(0, requiredFrameSize);
 }
 
 const dataToSend = {
 buffer: Array.from(frameData), // Convert Uint8Array to array of numbers
 width: width,
 height: height,
 pixelFormat: 'yuv420p',
 mode: 'SendRtmp'
 };
 
 console.log("Sending frame:", i);
 ws.send(JSON.stringify(dataToSend));
 }
 }
 };
 
 // Start recording and send data every 10 seconds
 mediaRecorder.start(frameDuration);
 
 console.log("MediaRecorder started.");
 } catch (error) {
 console.error('Error accessing media devices or starting recorder:', error);
 }
 };



and my backend


wss.on('connection', (ws) => {
 console.log('WebSocket connection established.');

 ws.on('message', async (data) => {
 try {
 const parsedData = JSON.parse(data);

 if (parsedData.mode === 'SendRtmp' && Array.isArray(parsedData.buffer)) {
 const { buffer, pixelFormat, width, height } = parsedData;
 const bufferArray = Buffer.from(buffer);

 await sendRtmpVideo(bufferArray, pixelFormat, width, height);
 } else {
 console.log('Received unknown or invalid mode or buffer data');
 }
 } catch (error) {
 console.error('Error parsing WebSocket message:', error);
 }
 });

 ws.on('close', () => {
 console.log('WebSocket connection closed.');
 });
 });
 const sendRtmpVideo = async (frameBuffer, pixelFormat, width, height) => {
 console.log("ffmpeg data",frameBuffer)
 try {
 const ratio = `${width}x${height}`;
 const ffmpegCommand = [
 '-re',
 '-f', 'rawvideo',
 '-pix_fmt', pixelFormat,
 '-s', ratio,
 '-i', 'pipe:0',
 '-c:v', 'libx264',
 '-preset', 'fast', // Specify the preset for libx264
 '-b:v', '3000k', // Specify the video bitrate
 '-loglevel', 'debug',
 '-f', 'flv',
 // '-flvflags', 'no_duration_filesize', 
 RTMPLINK
 ];


 const ffmpeg = spawn('ffmpeg', ffmpegCommand);

 ffmpeg.on('exit', (code, signal) => {
 if (code === 0) {
 console.log('FFmpeg process exited successfully.');
 } else {
 console.error(`FFmpeg process exited with code ${code} and signal ${signal}`);
 }
 });

 ffmpeg.on('error', (error) => {
 console.error('FFmpeg spawn error:', error);
 });

 ffmpeg.stderr.on('data', (data) => {
 console.error(`FFmpeg stderr: ${data}`);
 });

 ffmpeg.stdin.write(frameBuffer, (err) => {
 if (err) {
 console.error('Error writing to FFmpeg stdin:', err);
 } else {
 console.log('Data written to FFmpeg stdin successfully.');
 }
 ffmpeg.stdin.end(); // Close stdin after writing the buffer
 });
 } catch (error) {
 console.error('Error in sendRtmpVideo:', error);
 }
 };