Recherche avancée

Médias (1)

Mot : - Tags -/lev manovitch

Autres articles (58)

  • Multilang : améliorer l’interface pour les blocs multilingues

    18 février 2011, par

    Multilang est un plugin supplémentaire qui n’est pas activé par défaut lors de l’initialisation de MediaSPIP.
    Après son activation, une préconfiguration est mise en place automatiquement par MediaSPIP init permettant à la nouvelle fonctionnalité d’être automatiquement opérationnelle. Il n’est donc pas obligatoire de passer par une étape de configuration pour cela.

  • Publier sur MédiaSpip

    13 juin 2013

    Puis-je poster des contenus à partir d’une tablette Ipad ?
    Oui, si votre Médiaspip installé est à la version 0.2 ou supérieure. Contacter au besoin l’administrateur de votre MédiaSpip pour le savoir

  • Les formats acceptés

    28 janvier 2010, par

    Les commandes suivantes permettent d’avoir des informations sur les formats et codecs gérés par l’installation local de ffmpeg :
    ffmpeg -codecs ffmpeg -formats
    Les format videos acceptés en entrée
    Cette liste est non exhaustive, elle met en exergue les principaux formats utilisés : h264 : H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 m4v : raw MPEG-4 video format flv : Flash Video (FLV) / Sorenson Spark / Sorenson H.263 Theora wmv :
    Les formats vidéos de sortie possibles
    Dans un premier temps on (...)

Sur d’autres sites (6703)

  • ffmpeg failed to load audio file

    14 avril 2024, par Vaishnav Ghenge
    Failed to load audio: ffmpeg version 5.1.4-0+deb12u1 Copyright (c) Failed to load audio: ffmpeg version 5.1.4-0+deb12u1 Copyright (c) 2000-2023 the FFmpeg developers
  built with gcc 12 (Debian 12.2.0-14)
  configuration: --prefix=/usr --extra-version=0+deb12u1 --toolchain=hardened --libdir=/usr/lib/x86_64-linux-gnu --incdir=/usr/include/x86_64-linux-gnu --arch=amd64 --enable-gpl --disable-stripping --enable-gnutls --enable-ladspa --enable-libaom --enable-libass --enable-libbluray --enable-libbs2b --enable-libcaca --enable-libcdio --enable-libcodec2 --enable-libdav1d --enable-libflite --enable-libfontconfig --enable-libfreetype --enable-libfribidi --enable-libglslang --enable-libgme --enable-libgsm --enable-libjack --enable-libmp3lame --enable-libmysofa --enable-libopenjpeg --enable-libopenmpt --enable-libopus --enable-libpulse --enable-librabbitmq --enable-librist --enable-librubberband --enable-libshine --enable-libsnappy --enable-libsoxr --enable-libspeex --enable-libsrt --enable-libssh --enable-libsvtav1 --enable-libtheora --enable-libtwolame --enable-libvidstab --enable-libvorbis --enable-libvpx --enable-libwebp --enable-libx265 --enable-libxml2 --enable-libxvid --enable-libzimg --enable-libzmq --enable-libzvbi --enable-lv2 --enable-omx --enable-openal --enable-opencl --enable-opengl --enable-sdl2 --disable-sndio --enable-libjxl --enable-pocketsphinx --enable-librsvg --enable-libmfx --enable-libdc1394 --enable-libdrm --enable-libiec61883 --enable-chromaprint --enable-frei0r --enable-libx264 --enable-libplacebo --enable-librav1e --enable-shared
  libavutil      57. 28.100 / 57. 28.100
  libavcodec     59. 37.100 / 59. 37.100
  libavformat    59. 27.100 / 59. 27.100
  libavdevice    59.  7.100 / 59.  7.100
  libavfilter     8. 44.100 /  8. 44.100
  libswscale      6.  7.100 /  6.  7.100
  libswresample   4.  7.100 /  4.  7.100
  libpostproc    56.  6.100 / 56.  6.100
/tmp/tmpjlchcpdm.wav: Invalid data found when processing input


    


    backend :

    


    
@app.route("/transcribe", methods=["POST"])
def transcribe():
    # Check if audio file is present in the request
    if 'audio_file' not in request.files:
        return jsonify({"error": "No file part"}), 400
    
    audio_file = request.files.get('audio_file')

    # Check if audio_file is sent in files
    if not audio_file:
        return jsonify({"error": "`audio_file` is missing in request.files"}), 400

    # Check if the file is present
    if audio_file.filename == '':
        return jsonify({"error": "No selected file"}), 400

    # Save the file with a unique name
    filename = secure_filename(audio_file.filename)
    unique_filename = os.path.join("uploads", str(uuid.uuid4()) + '_' + filename)
    # audio_file.save(unique_filename)
    
    # Read the contents of the audio file
    contents = audio_file.read()

    max_file_size = 500 * 1024 * 1024
    if len(contents) > max_file_size:
        return jsonify({"error": "File is too large"}), 400

    # Check if the file extension suggests it's a WAV file
    if not filename.lower().endswith('.wav'):
        # Delete the file if it's not a WAV file
        os.remove(unique_filename)
        return jsonify({"error": "Only WAV files are supported"}), 400

    print(f"\033[92m{filename}\033[0m")

    # Call Celery task asynchronously
    result = transcribe_audio.delay(contents)

    return jsonify({
        "task_id": result.id,
        "status": "pending"
    })


@celery_app.task
def transcribe_audio(contents):
    # Transcribe the audio
    try:
        # Create a temporary file to save the audio data
        with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as temp_audio:
            temp_path = temp_audio.name
            temp_audio.write(contents)

            print(f"\033[92mFile temporary path: {temp_path}\033[0m")
            transcribe_start_time = time.time()

            # Transcribe the audio
            transcription = transcribe_with_whisper(temp_path)
            
            transcribe_end_time = time.time()
            print(f"\033[92mTranscripted text: {transcription}\033[0m")

            return transcription, transcribe_end_time - transcribe_start_time

    except Exception as e:
        print(f"\033[92mError: {e}\033[0m")
        return str(e)


    


    frontend :

    


        useEffect(() => {
        const init = () => {
            navigator.mediaDevices.getUserMedia({audio: true})
                .then((audioStream) => {
                    const recorder = new MediaRecorder(audioStream);

                    recorder.ondataavailable = e => {
                        if (e.data.size > 0) {
                            setChunks(prevChunks => [...prevChunks, e.data]);
                        }
                    };

                    recorder.onerror = (e) => {
                        console.log("error: ", e);
                    }

                    recorder.onstart = () => {
                        console.log("started");
                    }

                    recorder.start();

                    setStream(audioStream);
                    setRecorder(recorder);
                });
        }

        init();

        return () => {
            if (recorder && recorder.state === 'recording') {
                recorder.stop();
            }

            if (stream) {
                stream.getTracks().forEach(track => track.stop());
            }
        }
    }, []);

    useEffect(() => {
        // Send chunks of audio data to the backend at regular intervals
        const intervalId = setInterval(() => {
            if (recorder && recorder.state === 'recording') {
                recorder.requestData(); // Trigger data available event
            }
        }, 8000); // Adjust the interval as needed


        return () => {
            if (intervalId) {
                console.log("Interval cleared");
                clearInterval(intervalId);
            }
        };
    }, [recorder]);

    useEffect(() => {
        const processAudio = async () => {
            if (chunks.length > 0) {
                // Send the latest chunk to the server for transcription
                const latestChunk = chunks[chunks.length - 1];

                const audioBlob = new Blob([latestChunk]);
                convertBlobToAudioFile(audioBlob);
            }
        };

        void processAudio();
    }, [chunks]);

    const convertBlobToAudioFile = useCallback((blob: Blob) => {
        // Convert Blob to audio file (e.g., WAV)
        // This conversion may require using a third-party library or service
        // For example, you can use the MediaRecorder API to record audio in WAV format directly
        // Alternatively, you can use a library like recorderjs to perform the conversion
        // Here's a simplified example using recorderjs:

        const reader = new FileReader();
        reader.onload = () => {
            const audioBuffer = reader.result; // ArrayBuffer containing audio data

            // Send audioBuffer to Flask server or perform further processing
            sendAudioToFlask(audioBuffer as ArrayBuffer);
        };

        reader.readAsArrayBuffer(blob);
    }, []);

    const sendAudioToFlask = useCallback((audioBuffer: ArrayBuffer) => {
        const formData = new FormData();
        formData.append('audio_file', new Blob([audioBuffer]), `speech_audio.wav`);

        console.log(formData.get("audio_file"));

        fetch('http://34.87.75.138:8000/transcribe', {
            method: 'POST',
            body: formData
        })
            .then(response => response.json())
            .then((data: { task_id: string, status: string }) => {
                pendingTaskIdsRef.current.push(data.task_id);
            })
            .catch(error => {
                console.error('Error sending audio to Flask server:', error);
            });
    }, []);


    


    I was trying to pass the audio from frontend to whisper model which is in flask app

    


  • FFMPEG throws error while converting WEBM/MP4 audio files from MediaRecorder to MP3

    27 septembre 2022, par Sahil Malik

    I use MediaRecorder to record the audio from the browser and then upload it to my server (ARM-based Linux machine - AWS Lambda Function, if it matters). Based on the browser type, I get either an MP4 file (for the Safari browser) OR a WEBM file (for every other browser) from the MediaRecorder. The audio is converted to Base64 string and posted to my server with FFMPEG.

    


    MediaRecorder implementation

    


    const audioStream = await navigator.mediaDevices.getUserMedia(mediaConstraints);
const MediaRecorder = window['MediaRecorder'];
const mimeType = MediaRecorder.isTypeSupported('audio/webm') ? 'audio/webm' : 'audio/mp4';
const recordingFileExtension = MediaRecorder.isTypeSupported('audio/webm') ? 'webm' : 'mp4';
mediaRecorder = new MediaRecorder(audioStream, { mimeType });
mediaRecorder.ondataavailable = convertBlobAndUploadChunk;
mediaRecorder.start(30 * 1000); // timeslice needs to be in ms
// When user stops recording
mediaRecorder.stop();
audioStream.getTracks().forEach( t => { t.stop(); });
// To convert the audio blob to string
function convertBlobToBase64(blob) {
    return new Promise((resolve, reject) => {
        const reader = new FileReader();
        reader.readAsDataURL(blob);
        reader.onload = () => {
            const tmpStr = reader.result.toString();
            resolve(tmpStr.substring(tmpStr.indexOf('base64,') + 7));
        };
        reader.onerror = error => reject(error);
    });
}

let blobCount = 0;
async function convertBlobAndUploadChunk(blobEvent) {
    if (!blobEvent.data || blobEvent.data.size === 0) return;

    blobCount++;

    const recordData = JSON.stringify({
        M: {
            blobCount,
            //some other meta data
        },
        D: await convertBlobToBase64(audioBlob),
    });

    await angularHttpClient.post(apiUrl, recordData, new HttpHeaders({
        'Content-Type': 'application/json',
        'x-api-key': apiKey,
    })).toPromise();
}


    


    On my server, I convert the WEBM/MP4 file to an MP3 file for better cross-browser compatibility and to enable scrubbing.

    


    Backend Lambda Implementation

    


    const FFMpegCommand = require('fluent-ffmpeg'); // v2.1.2
new FFMpegCommand()
        .input(originalFile)
        .on('end', (error, stdOut, stdError) => {
            if (error) {
                console.error(error);
            }
        })
        .save(convertedFile);


    


    This works perfectly fine 99.9% of the time but FFMPEG throws one of the following errors for 0.1% of the time :

    


    Error 1 : Invalid data found when processing input

    


    error reading header

    


    ffmpeg version 4.4-static https://johnvansickle.com/ffmpeg/ Copyright (c) 2000-2021 the 
FFmpeg developers
built with gcc 8 (Debian 8.3.0-6)
configuration: --enable-gpl --enable-version3 --enable-static --disable-debug --disable-ffplay --disable-indev=sndio --disable-outdev=sndio --cc=gcc --enable-fontconfig --enable-frei0r --enable-gnutls --enable-gmp --enable-libgme --enable-gray --enable-libaom --enable-libfribidi --enable-libass --enable-libvmaf --enable-libfreetype --enable-libmp3lame --enable-libopencore-amrnb --enable-libopencore-amrwb --enable-libopenjpeg --enable-librubberband --enable-libsoxr --enable-libspeex --enable-libsrt --enable-libvorbis --enable-libopus --enable-libtheora --enable-libvidstab --enable-libvo-amrwbenc --enable-libvpx --enable-libwebp --enable-libx264 --enable-libx265 --enable-libxml2 --enable-libdav1d --enable-libxvid --enable-libzvbi --enable-libzimg
libavutil 56. 70.100 / 56. 70.100
libavcodec 58.134.100 / 58.134.100
libavformat 58. 76.100 / 58. 76.100
libavdevice 58. 13.100 / 58. 13.100
libavfilter 7.110.100 / 7.110.100
libswscale 5. 9.100 / 5. 9.100
libswresample 3. 9.100 / 3. 9.100
libpostproc 55. 9.100 / 55. 9.100
[mov,mp4,m4a,3gp,3g2,mj2 @ 0x7232f40] could not find corresponding trex (id 1)
[mov,mp4,m4a,3gp,3g2,mj2 @ 0x7232f40] could not find corresponding track id 0
[mov,mp4,m4a,3gp,3g2,mj2 @ 0x7232f40] trun track id unknown, no tfhd was found
[mov,mp4,m4a,3gp,3g2,mj2 @ 0x7232f40] error reading header
/tmp/long-filename-of-140chars-to-keep-files-unique.mp4: Invalid data found when processing input


    


    EBML Header parsing failed

    


    ffmpeg version 4.4-static https://johnvansickle.com/ffmpeg/ Copyright (c) 2000-2021 the FFmpeg developers
built with gcc 8 (Debian 8.3.0-6)
configuration: --enable-gpl --enable-version3 --enable-static --disable-debug --disable-ffplay --disable-indev=sndio --disable-outdev=sndio --cc=gcc --enable-fontconfig --enable-frei0r --enable-gnutls --enable-gmp --enable-libgme --enable-gray --enable-libaom --enable-libfribidi --enable-libass --enable-libvmaf --enable-libfreetype --enable-libmp3lame --enable-libopencore-amrnb --enable-libopencore-amrwb --enable-libopenjpeg --enable-librubberband --enable-libsoxr --enable-libspeex --enable-libsrt --enable-libvorbis --enable-libopus --enable-libtheora --enable-libvidstab --enable-libvo-amrwbenc --enable-libvpx --enable-libwebp --enable-libx264 --enable-libx265 --enable-libxml2 --enable-libdav1d --enable-libxvid --enable-libzvbi --enable-libzimg
libavutil 56. 70.100 / 56. 70.100
libavcodec 58.134.100 / 58.134.100
libavformat 58. 76.100 / 58. 76.100
libavdevice 58. 13.100 / 58. 13.100
libavfilter 7.110.100 / 7.110.100
libswscale 5. 9.100 / 5. 9.100
libswresample 3. 9.100 / 3. 9.100
libpostproc 55. 9.100 / 55. 9.100
[matroska,webm @ 0x7595f40] Format matroska,webm detected only with low score of 1, misdetection possible!
[matroska,webm @ 0x7595f40] EBML header parsing failed
/tmp/long-filename-of-140chars-to-keep-files-unique.webm: Invalid data found when processing input


    


    No specific details

    


    ffmpeg version 4.4-static https://johnvansickle.com/ffmpeg/ Copyright (c) 2000-2021 the FFmpeg developers
built with gcc 8 (Debian 8.3.0-6)
configuration: --enable-gpl --enable-version3 --enable-static --disable-debug --disable-ffplay --disable-indev=sndio --disable-outdev=sndio --cc=gcc --enable-fontconfig --enable-frei0r --enable-gnutls --enable-gmp --enable-libgme --enable-gray --enable-libaom --enable-libfribidi --enable-libass --enable-libvmaf --enable-libfreetype --enable-libmp3lame --enable-libopencore-amrnb --enable-libopencore-amrwb --enable-libopenjpeg --enable-librubberband --enable-libsoxr --enable-libspeex --enable-libsrt --enable-libvorbis --enable-libopus --enable-libtheora --enable-libvidstab --enable-libvo-amrwbenc --enable-libvpx --enable-libwebp --enable-libx264 --enable-libx265 --enable-libxml2 --enable-libdav1d --enable-libxvid --enable-libzvbi --enable-libzimg
libavutil 56. 70.100 / 56. 70.100
libavcodec 58.134.100 / 58.134.100
libavformat 58. 76.100 / 58. 76.100
libavdevice 58. 13.100 / 58. 13.100
libavfilter 7.110.100 / 7.110.100
libswscale 5. 9.100 / 5. 9.100
libswresample 3. 9.100 / 3. 9.100
libpostproc 55. 9.100 / 55. 9.100
/tmp/long-filename-of-140chars-to-keep-files-unique.webm: Invalid data found when processing input


    


    Error 2 : Output file #0 does not contain any stream

    


    ffmpeg version 4.4-static https://johnvansickle.com/ffmpeg/ Copyright (c) 2000-2021 the FFmpeg developers
built with gcc 8 (Debian 8.3.0-6)
configuration: --enable-gpl --enable-version3 --enable-static --disable-debug --disable-ffplay --disable-indev=sndio --disable-outdev=sndio --cc=gcc --enable-fontconfig --enable-frei0r --enable-gnutls --enable-gmp --enable-libgme --enable-gray --enable-libaom --enable-libfribidi --enable-libass --enable-libvmaf --enable-libfreetype --enable-libmp3lame --enable-libopencore-amrnb --enable-libopencore-amrwb --enable-libopenjpeg --enable-librubberband --enable-libsoxr --enable-libspeex --enable-libsrt --enable-libvorbis --enable-libopus --enable-libtheora --enable-libvidstab --enable-libvo-amrwbenc --enable-libvpx --enable-libwebp --enable-libx264 --enable-libx265 --enable-libxml2 --enable-libdav1d --enable-libxvid --enable-libzvbi --enable-libzimg
libavutil 56. 70.100 / 56. 70.100
libavcodec 58.134.100 / 58.134.100
libavformat 58. 76.100 / 58. 76.100
libavdevice 58. 13.100 / 58. 13.100
libavfilter 7.110.100 / 7.110.100
libswscale 5. 9.100 / 5. 9.100
libswresample 3. 9.100 / 3. 9.100
libpostproc 55. 9.100 / 55. 9.100
[mpegts @ 0x5d19f40] Format mpegts detected only with low score of 2, misdetection possible!
[mpegts @ 0x5d19f40] Could not detect TS packet size, defaulting to non-FEC/DVHS
Input #0, mpegts, from '/tmp/long-filename-of-140chars-to-keep-files-unique.webm':
Duration: N/A, bitrate: N/A
Output #0, mp3, to '/tmp/long-filename-of-140chars-to-keep-files-unique.mp3':
Output file #0 does not contain any stream


    


    Thanks for reading, any help/suggestion is highly appreciated.

    


  • Stream sent via FFMPEG (NodeJS) to RTMP (YouTube) not being received

    10 décembre 2024, par Qumber

    I am writing a very basic chrome extension that captures and sends video stream to a nodeJS server, which in turns sends it to Youtube live server.

    


    Here is my implementation of the backend which receives data via WebRTC and send to YT using FFMPEG :

    


    const express = require('express');
const cors = require('cors');
const { RTCPeerConnection, RTCSessionDescription } = require('@roamhq/wrtc');
const { spawn } = require('child_process');

const app = express();
app.use(express.json());
app.use(cors());

app.post('/webrtc', async (req, res) => {
  const peerConnection = new RTCPeerConnection();

  // Start ffmpeg process for streaming
  const ffmpeg = spawn('ffmpeg', [
    '-f', 'flv',
    '-i', 'pipe:0',
    '-c:v', 'libx264',
    '-preset', 'veryfast',
    '-maxrate', '3000k',
    '-bufsize', '6000k',
    '-pix_fmt', 'yuv420p',
    '-g', '50',
    '-f', 'flv',
    'rtmp://a.rtmp.youtube.com/live2/MY_KEY'
  ]);

  ffmpeg.on('error', (err) => {
    console.error('FFmpeg error:', err);
  });

  ffmpeg.stderr.on('data', (data) => {
    console.error('FFmpeg stderr:', data.toString());
  });

  ffmpeg.stdout.on('data', (data) => {
    console.log('FFmpeg stdout:', data.toString());
  });

  // Handle incoming tracks
  peerConnection.ontrack = (event) => {
    console.log('Track received:', event.track.kind);
    const track = event.track;

    // Stream the incoming track to FFmpeg
    track.onunmute = () => {
      console.log('Track unmuted:', track.kind);
      const reader = track.createReadStream();
      reader.on('data', (chunk) => {
        console.log('Forwarding chunk to FFmpeg:', chunk.length);
        ffmpeg.stdin.write(chunk);
      });
      reader.on('end', () => {
        console.log('Stream ended');
        ffmpeg.stdin.end();
      });
    };

    track.onmute = () => {
      console.log('Track muted:', track.kind);
    };
  };

  // Set the remote description (offer) received from the client
  await peerConnection.setRemoteDescription(new RTCSessionDescription(req.body.sdp));

  // Create an answer and send it back to the client
  const answer = await peerConnection.createAnswer();
  await peerConnection.setLocalDescription(answer);

  res.json({ sdp: peerConnection.localDescription });
});

app.listen(3000, () => {
  console.log('WebRTC to RTMP server running on port 3000');
});



    


    This is the output I get, but nothing gets sent to YouTube :

    


    

    FFmpeg stderr: ffmpeg version 7.0.2 Copyright (c) 2000-2024 the FFmpeg developers
  built with Apple clang version 15.0.0 (clang-1500.3.9.4)

FFmpeg stderr:   configuration: --prefix=/opt/homebrew/Cellar/ffmpeg/7.0.2_1 --enable-shared --enable-pthreads --enable-version3 --cc=clang --host-cflags= --host-ldflags='-Wl,-ld_classic' --enable-ffplay --enable-gnutls --enable-gpl --enable-libaom --enable-libaribb24 --enable-libbluray --enable-libdav1d --enable-libharfbuzz --enable-libjxl --enable-libmp3lame --enable-libopus --enable-librav1e --enable-librist --enable-librubberband --enable-libsnappy --enable-libsrt --enable-libssh --enable-libsvtav1 --enable-libtesseract --enable-libtheora --enable-libvidstab --enable-libvmaf --enable-libvorbis --enable-libvpx --enable-libwebp --enable-libx264 --enable-libx265 --enable-libxml2 --enable-libxvid --enable-lzma --enable-libfontconfig --enable-libfreetype --enable-frei0r --enable-libass --enable-libopencore-amrnb --enable-libopencore-amrwb --enable-libopenjpeg --enable-libspeex --enable-libsoxr --enable-libzmq --enable-libzimg --disable-libjack --disable-indev=jack --enable-videotoolbox --enable-audiotoolbox --enable-neon

FFmpeg stderr:   libavutil      59.  8.100 / 59.  8.100
  libavcodec     61.  3.100 / 61.  3.100
  libavformat    61.  1.100 / 61.  1.100
  libavdevice    61.  1.100 / 61.  1.100

FFmpeg stderr:   libavfilter    10.  1.100 / 10.  1.100
  libswscale      8.  1.100 /  8.  1.100
  libswresample   5.  1.100 /  5.  1.100
  libpostproc    58.  1.100 / 58.  1.100


    


    


    I do not understand what I am doing wrong. Any help would be appreciated.

    



    


    Optionally Here's the frontend code from the extension, which (to me) appears to be recording and sending the capture :

    


    popup.js & popup.html

    


    

    

    document.addEventListener('DOMContentLoaded', () => {
  document.getElementById('openCapturePage').addEventListener('click', () => {
    chrome.tabs.create({
      url: chrome.runtime.getURL('capture.html')
    });
  });
});

    


    &#xA;&#xA;&#xA;&#xA;  &#xA;  <code class="echappe-js">&lt;script src='http://stackoverflow.com/feeds/tag/popup.js'&gt;&lt;/script&gt;&#xA;&#xA;&#xA;&#xA;  

    StreamSavvy

    &#xA;

    &#xA;&#xA;&#xA;

    &#xD;&#xA;

    &#xD;&#xA;

    &#xD;&#xA;&#xA;

    capture.js & capture.html

    &#xA;

    &#xD;&#xA;
    &#xD;&#xA;
    let peerConnection;&#xA;&#xA;async function startStreaming() {&#xA;  try {&#xA;    const stream = await navigator.mediaDevices.getDisplayMedia({&#xA;      video: {&#xA;        cursor: "always"&#xA;      },&#xA;      audio: false&#xA;    });&#xA;&#xA;    peerConnection = new RTCPeerConnection({&#xA;      iceServers: [{&#xA;        urls: &#x27;stun:stun.l.google.com:19302&#x27;&#xA;      }]&#xA;    });&#xA;&#xA;    stream.getTracks().forEach(track => peerConnection.addTrack(track, stream));&#xA;&#xA;    const offer = await peerConnection.createOffer();&#xA;    await peerConnection.setLocalDescription(offer);&#xA;&#xA;    const response = await fetch(&#x27;http://localhost:3000/webrtc&#x27;, {&#xA;      method: &#x27;POST&#x27;,&#xA;      headers: {&#xA;        &#x27;Content-Type&#x27;: &#x27;application/json&#x27;&#xA;      },&#xA;      body: JSON.stringify({&#xA;        sdp: peerConnection.localDescription&#xA;      })&#xA;    });&#xA;&#xA;    const {&#xA;      sdp&#xA;    } = await response.json();&#xA;    await peerConnection.setRemoteDescription(new RTCSessionDescription(sdp));&#xA;&#xA;    console.log("Streaming to server via WebRTC...");&#xA;  } catch (error) {&#xA;    console.error("Error starting streaming:", error.name, error.message);&#xA;  }&#xA;}&#xA;&#xA;async function stopStreaming() {&#xA;  if (peerConnection) {&#xA;    // Stop all media tracks&#xA;    peerConnection.getSenders().forEach(sender => {&#xA;      if (sender.track) {&#xA;        sender.track.stop();&#xA;      }&#xA;    });&#xA;&#xA;    // Close the peer connection&#xA;    peerConnection.close();&#xA;    peerConnection = null;&#xA;    console.log("Streaming stopped");&#xA;  }&#xA;}&#xA;&#xA;document.addEventListener(&#x27;DOMContentLoaded&#x27;, () => {&#xA;  document.getElementById(&#x27;startCapture&#x27;).addEventListener(&#x27;click&#x27;, startStreaming);&#xA;  document.getElementById(&#x27;stopCapture&#x27;).addEventListener(&#x27;click&#x27;, stopStreaming);&#xA;});

    &#xD;&#xA;

    &#xA;&#xA;&#xA;&#xA;  &#xA;  <code class="echappe-js">&lt;script src='http://stackoverflow.com/feeds/tag/capture.js'&gt;&lt;/script&gt;&#xA;&#xA;&#xA;&#xA;  

    StreamSavvy Capture

    &#xA;

    &#xA;

    &#xA;&#xA;&#xA;

    &#xD;&#xA;

    &#xD;&#xA;

    &#xD;&#xA;&#xA;

    background.js (service worker)

    &#xA;

    &#xD;&#xA;
    &#xD;&#xA;
    chrome.runtime.onInstalled.addListener(() => {&#xA;  console.log("StreamSavvy Extension Installed");&#xA;});&#xA;&#xA;chrome.runtime.onMessage.addListener((message, sender, sendResponse) => {&#xA;  if (message.type === &#x27;startStreaming&#x27;) {&#xA;    chrome.tabs.create({&#xA;      url: chrome.runtime.getURL(&#x27;capture.html&#x27;)&#xA;    });&#xA;    sendResponse({&#xA;      status: &#x27;streaming&#x27;&#xA;    });&#xA;  } else if (message.type === &#x27;stopStreaming&#x27;) {&#xA;    chrome.tabs.query({&#xA;      url: chrome.runtime.getURL(&#x27;capture.html&#x27;)&#xA;    }, (tabs) => {&#xA;      if (tabs.length > 0) {&#xA;        chrome.tabs.sendMessage(tabs[0].id, {&#xA;          type: &#x27;stopStreaming&#x27;&#xA;        });&#xA;        sendResponse({&#xA;          status: &#x27;stopped&#x27;&#xA;        });&#xA;      }&#xA;    });&#xA;  }&#xA;  return true; // Keep the message channel open for sendResponse&#xA;});

    &#xD;&#xA;

    &#xD;&#xA;

    &#xD;&#xA;&#xA;