
Recherche avancée
Médias (1)
-
Rennes Emotion Map 2010-11
19 octobre 2011, par
Mis à jour : Juillet 2013
Langue : français
Type : Texte
Autres articles (81)
-
Personnaliser en ajoutant son logo, sa bannière ou son image de fond
5 septembre 2013, parCertains thèmes prennent en compte trois éléments de personnalisation : l’ajout d’un logo ; l’ajout d’une bannière l’ajout d’une image de fond ;
-
Ecrire une actualité
21 juin 2013, parPrésentez les changements dans votre MédiaSPIP ou les actualités de vos projets sur votre MédiaSPIP grâce à la rubrique actualités.
Dans le thème par défaut spipeo de MédiaSPIP, les actualités sont affichées en bas de la page principale sous les éditoriaux.
Vous pouvez personnaliser le formulaire de création d’une actualité.
Formulaire de création d’une actualité Dans le cas d’un document de type actualité, les champs proposés par défaut sont : Date de publication ( personnaliser la date de publication ) (...) -
Publier sur MédiaSpip
13 juin 2013Puis-je poster des contenus à partir d’une tablette Ipad ?
Oui, si votre Médiaspip installé est à la version 0.2 ou supérieure. Contacter au besoin l’administrateur de votre MédiaSpip pour le savoir
Sur d’autres sites (6036)
-
Accessibility Testing : Why It Matters and How to Get Started
7 mai 2024, par Erin -
What Is Ethical SEO & Why Does It Matter ?
7 mai 2024, par Erin -
ffmpeg failed to load audio file
14 avril 2024, par Vaishnav GhengeFailed to load audio: ffmpeg version 5.1.4-0+deb12u1 Copyright (c) Failed to load audio: ffmpeg version 5.1.4-0+deb12u1 Copyright (c) 2000-2023 the FFmpeg developers
 built with gcc 12 (Debian 12.2.0-14)
 configuration: --prefix=/usr --extra-version=0+deb12u1 --toolchain=hardened --libdir=/usr/lib/x86_64-linux-gnu --incdir=/usr/include/x86_64-linux-gnu --arch=amd64 --enable-gpl --disable-stripping --enable-gnutls --enable-ladspa --enable-libaom --enable-libass --enable-libbluray --enable-libbs2b --enable-libcaca --enable-libcdio --enable-libcodec2 --enable-libdav1d --enable-libflite --enable-libfontconfig --enable-libfreetype --enable-libfribidi --enable-libglslang --enable-libgme --enable-libgsm --enable-libjack --enable-libmp3lame --enable-libmysofa --enable-libopenjpeg --enable-libopenmpt --enable-libopus --enable-libpulse --enable-librabbitmq --enable-librist --enable-librubberband --enable-libshine --enable-libsnappy --enable-libsoxr --enable-libspeex --enable-libsrt --enable-libssh --enable-libsvtav1 --enable-libtheora --enable-libtwolame --enable-libvidstab --enable-libvorbis --enable-libvpx --enable-libwebp --enable-libx265 --enable-libxml2 --enable-libxvid --enable-libzimg --enable-libzmq --enable-libzvbi --enable-lv2 --enable-omx --enable-openal --enable-opencl --enable-opengl --enable-sdl2 --disable-sndio --enable-libjxl --enable-pocketsphinx --enable-librsvg --enable-libmfx --enable-libdc1394 --enable-libdrm --enable-libiec61883 --enable-chromaprint --enable-frei0r --enable-libx264 --enable-libplacebo --enable-librav1e --enable-shared
 libavutil 57. 28.100 / 57. 28.100
 libavcodec 59. 37.100 / 59. 37.100
 libavformat 59. 27.100 / 59. 27.100
 libavdevice 59. 7.100 / 59. 7.100
 libavfilter 8. 44.100 / 8. 44.100
 libswscale 6. 7.100 / 6. 7.100
 libswresample 4. 7.100 / 4. 7.100
 libpostproc 56. 6.100 / 56. 6.100
/tmp/tmpjlchcpdm.wav: Invalid data found when processing input



backend :



@app.route("/transcribe", methods=["POST"])
def transcribe():
 # Check if audio file is present in the request
 if 'audio_file' not in request.files:
 return jsonify({"error": "No file part"}), 400
 
 audio_file = request.files.get('audio_file')

 # Check if audio_file is sent in files
 if not audio_file:
 return jsonify({"error": "`audio_file` is missing in request.files"}), 400

 # Check if the file is present
 if audio_file.filename == '':
 return jsonify({"error": "No selected file"}), 400

 # Save the file with a unique name
 filename = secure_filename(audio_file.filename)
 unique_filename = os.path.join("uploads", str(uuid.uuid4()) + '_' + filename)
 # audio_file.save(unique_filename)
 
 # Read the contents of the audio file
 contents = audio_file.read()

 max_file_size = 500 * 1024 * 1024
 if len(contents) > max_file_size:
 return jsonify({"error": "File is too large"}), 400

 # Check if the file extension suggests it's a WAV file
 if not filename.lower().endswith('.wav'):
 # Delete the file if it's not a WAV file
 os.remove(unique_filename)
 return jsonify({"error": "Only WAV files are supported"}), 400

 print(f"\033[92m{filename}\033[0m")

 # Call Celery task asynchronously
 result = transcribe_audio.delay(contents)

 return jsonify({
 "task_id": result.id,
 "status": "pending"
 })


@celery_app.task
def transcribe_audio(contents):
 # Transcribe the audio
 try:
 # Create a temporary file to save the audio data
 with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as temp_audio:
 temp_path = temp_audio.name
 temp_audio.write(contents)

 print(f"\033[92mFile temporary path: {temp_path}\033[0m")
 transcribe_start_time = time.time()

 # Transcribe the audio
 transcription = transcribe_with_whisper(temp_path)
 
 transcribe_end_time = time.time()
 print(f"\033[92mTranscripted text: {transcription}\033[0m")

 return transcription, transcribe_end_time - transcribe_start_time

 except Exception as e:
 print(f"\033[92mError: {e}\033[0m")
 return str(e)



frontend :


useEffect(() => {
 const init = () => {
 navigator.mediaDevices.getUserMedia({audio: true})
 .then((audioStream) => {
 const recorder = new MediaRecorder(audioStream);

 recorder.ondataavailable = e => {
 if (e.data.size > 0) {
 setChunks(prevChunks => [...prevChunks, e.data]);
 }
 };

 recorder.onerror = (e) => {
 console.log("error: ", e);
 }

 recorder.onstart = () => {
 console.log("started");
 }

 recorder.start();

 setStream(audioStream);
 setRecorder(recorder);
 });
 }

 init();

 return () => {
 if (recorder && recorder.state === 'recording') {
 recorder.stop();
 }

 if (stream) {
 stream.getTracks().forEach(track => track.stop());
 }
 }
 }, []);

 useEffect(() => {
 // Send chunks of audio data to the backend at regular intervals
 const intervalId = setInterval(() => {
 if (recorder && recorder.state === 'recording') {
 recorder.requestData(); // Trigger data available event
 }
 }, 8000); // Adjust the interval as needed


 return () => {
 if (intervalId) {
 console.log("Interval cleared");
 clearInterval(intervalId);
 }
 };
 }, [recorder]);

 useEffect(() => {
 const processAudio = async () => {
 if (chunks.length > 0) {
 // Send the latest chunk to the server for transcription
 const latestChunk = chunks[chunks.length - 1];

 const audioBlob = new Blob([latestChunk]);
 convertBlobToAudioFile(audioBlob);
 }
 };

 void processAudio();
 }, [chunks]);

 const convertBlobToAudioFile = useCallback((blob: Blob) => {
 // Convert Blob to audio file (e.g., WAV)
 // This conversion may require using a third-party library or service
 // For example, you can use the MediaRecorder API to record audio in WAV format directly
 // Alternatively, you can use a library like recorderjs to perform the conversion
 // Here's a simplified example using recorderjs:

 const reader = new FileReader();
 reader.onload = () => {
 const audioBuffer = reader.result; // ArrayBuffer containing audio data

 // Send audioBuffer to Flask server or perform further processing
 sendAudioToFlask(audioBuffer as ArrayBuffer);
 };

 reader.readAsArrayBuffer(blob);
 }, []);

 const sendAudioToFlask = useCallback((audioBuffer: ArrayBuffer) => {
 const formData = new FormData();
 formData.append('audio_file', new Blob([audioBuffer]), `speech_audio.wav`);

 console.log(formData.get("audio_file"));

 fetch('http://34.87.75.138:8000/transcribe', {
 method: 'POST',
 body: formData
 })
 .then(response => response.json())
 .then((data: { task_id: string, status: string }) => {
 pendingTaskIdsRef.current.push(data.task_id);
 })
 .catch(error => {
 console.error('Error sending audio to Flask server:', error);
 });
 }, []);



I was trying to pass the audio from frontend to whisper model which is in flask app