
Recherche avancée
Autres articles (18)
-
Encoding and processing into web-friendly formats
13 avril 2011, parMediaSPIP automatically converts uploaded files to internet-compatible formats.
Video files are encoded in MP4, Ogv and WebM (supported by HTML5) and MP4 (supported by Flash).
Audio files are encoded in MP3 and Ogg (supported by HTML5) and MP3 (supported by Flash).
Where possible, text is analyzed in order to retrieve the data needed for search engine detection, and then exported as a series of image files.
All uploaded files are stored online in their original format, so you can (...) -
Demande de création d’un canal
12 mars 2010, parEn fonction de la configuration de la plateforme, l’utilisateur peu avoir à sa disposition deux méthodes différentes de demande de création de canal. La première est au moment de son inscription, la seconde, après son inscription en remplissant un formulaire de demande.
Les deux manières demandent les mêmes choses fonctionnent à peu près de la même manière, le futur utilisateur doit remplir une série de champ de formulaire permettant tout d’abord aux administrateurs d’avoir des informations quant à (...) -
Installation en mode ferme
4 février 2011, parLe mode ferme permet d’héberger plusieurs sites de type MediaSPIP en n’installant qu’une seule fois son noyau fonctionnel.
C’est la méthode que nous utilisons sur cette même plateforme.
L’utilisation en mode ferme nécessite de connaïtre un peu le mécanisme de SPIP contrairement à la version standalone qui ne nécessite pas réellement de connaissances spécifique puisque l’espace privé habituel de SPIP n’est plus utilisé.
Dans un premier temps, vous devez avoir installé les mêmes fichiers que l’installation (...)
Sur d’autres sites (4489)
-
How can I make my loop command for a pythin discord bot work ?
17 octobre 2022, par Dioso I am kinda new to python and wanted to make a discord music bot. It works pretty well, except that I am not able to figure out how to code the queue loop command and the playing now command(this one gives me a full link instead of just the name). I have bolded the 2 commands that I cannot figure out.


from ast import alias
import discord
from discord.ext import commands

from youtube_dl import YoutubeDL

class music_cog(commands.Cog):
 def __init__(self, bot):
 self.bot = bot
 
 #all the music related stuff
 self.is_playing = False
 self.is_paused = False
 self.is_loop = False
 self.now_playing =""
 # 2d array containing [song, channel]
 self.music_queue = []
 self.YDL_OPTIONS = {'format': 'bestaudio', 'noplaylist':'True'}
 self.FFMPEG_OPTIONS = {'before_options': '-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5', 'options': '-vn'}

 self.vc = None

 #searching the item on youtube
 def search_yt(self, item):
 with YoutubeDL(self.YDL_OPTIONS) as ydl:
 try: 
 info = ydl.extract_info("ytsearch:%s" % item, download=False)['entries'][0]
 except Exception: 
 return False

 return {'source': info['formats'][0]['url'], 'title': info['title']}

 def play_next(self):
 if len(self.music_queue) > 0:
 self.is_playing = True

 #get the first url
 m_url = self.music_queue[0][0]['source']
 self.now_playing = self.music_queue[0][0]['title']
 #remove the first element as you are currently playing it
 self.music_queue.pop(0)

 self.vc.play(discord.FFmpegPCMAudio(m_url, **self.FFMPEG_OPTIONS), after=lambda e: self.play_next())
 else:
 self.is_playing = False

 # infinite loop checking 
 async def play_music(self, ctx):
 if len(self.music_queue) > 0:
 self.is_playing = True
 m_url = self.music_queue[0][0]['source']
 
 #try to connect to voice channel if you are not already connected
 if self.vc == None or not self.vc.is_connected():
 self.vc = await self.music_queue[0][1].connect()

 #in case we fail to connect
 if self.vc == None:
 await ctx.send("Could not connect to the voice channel")
 return
 else:
 await self.vc.move_to(self.music_queue[0][1])
 self.now_playing = m_url
 #remove the first element as you are currently playing it
 self.music_queue.pop(0)

 self.vc.play(discord.FFmpegPCMAudio(m_url, **self.FFMPEG_OPTIONS), after=lambda e: self.play_next())
 else:
 self.is_playing = False

 @commands.command(name="play", aliases=["p","playing"], help="Plays a selected song from youtube")
 async def play(self, ctx, *args):
 query = " ".join(args)
 
 voice_channel = ctx.author.voice.channel
 if voice_channel is None:
 #you need to be connected so that the bot knows where to go
 await ctx.send("Connect to a voice channel!")
 elif self.is_paused:
 self.vc.resume()
 else:
 global song
 song = self.search_yt(query)
 if type(song) == type(True):
 await ctx.send("Could not download the song. Incorrect format try another keyword. This could be due to playlist or a livestream format.")
 else:
 await ctx.send("Song added to the queue")
 self.music_queue.append([song, voice_channel])
 
 if self.is_playing == False:
 await self.play_music(ctx)

 @commands.command(name="pause", help="Pauses the current song being played")
 async def pause(self, ctx, *args):
 if self.is_playing:
 self.is_playing = False
 self.is_paused = True
 self.vc.pause()
 await ctx.send("Music paused")
 elif self.is_paused:
 self.is_paused = False
 self.is_playing = True
 self.vc.resume()
 await ctx.send("Music resumed")

 @commands.command(name = "resume", aliases=["r"], help="Resumes playing with the discord bot")
 async def resume(self, ctx, *args):
 if self.is_paused:
 self.is_paused = False
 self.is_playing = True
 self.vc.resume()
 await ctx.send("Music resumed")
 else:
 await ctx.send("Music is not paused")

 @commands.command(name="skip", aliases=["s"], help="Skips the current song being played")
 async def skip(self, ctx):
 if self.vc != None and self.vc:
 self.vc.stop()
 #try to play next in the queue if it exists
 await self.play_music(ctx)
 await ctx.send("Skipped current song")

 @commands.command(name="queue", aliases=["q"], help="Displays the current songs in queue")
 async def queue(self, ctx):
 global retval 
 retval = "```"
 for i in range(0, len(self.music_queue)):
 # display a max of 5 songs in the current queue
 #if (i > 4): break
 l = str(i+1)
 retval += l +". " + self.music_queue[i][0]['title'] + "\n"

 if retval != "```":
 retval+="```"
 await ctx.send(retval)
 else:
 await ctx.send("No music in queue")

 **@commands.command(name="loop", help="Loops the queue")**
 async def loop(self, ctx):
 if self.is_loop == False:
 self.is_loop = True
 else:
 self.is_loop = False 
 if self.is_loop == True:
 if len(self.music_queue)==0:
 await ctx.send("No music to loop")
 else:
 i=0
 while self.is_loop == True and i code>


-
Turn off sw_scale conversion to planar YUV 32 byte alignment requirements
8 novembre 2022, par flanselI am experiencing artifacts on the right edge of scaled and converted images when converting into planar YUV pixel formats with sw_scale. I am reasonably sure (although I can not find it anywhere in the documentation) that this is because sw_scale is using an optimization for 32 byte aligned lines, in the destination. However I would like to turn this off because I am using sw_scale for image composition, so even though the destination lines may be 32 byte aligned, the output image may not be.


Example.


Full output frame is 1280x720 yuv422p10le. (this is 32 byte aligned)
However into the top left corner I am scaling an image with an outwidth of 1280 / 3 = 426.
426 in this format is not 32 byte aligned, but I believe sw_scale sees that the output linesize is 32 byte aligned and overwrites the width of 426 putting garbage in the next 22 bytes of data thinking this is simply padding when in my case this is displayable area.


This is why I need to actually disable this optimization or somehow trick sw_scale into believing it does not apply while keeping intact the way the program works, which is otherwise fine.


I have tried adding extra padding to the destination lines so they are no longer 32 byte aligned,
this did not help as far as I can tell.


Edit with code Example. Rendering omitted for ease of use.
Also here is a similar issue, unfortunately as I stated there fix will not work for my use case. https://github.com/obsproject/obs-studio/pull/2836


Use the commented line of code to swap between a output width which is and isnt 32 byte aligned.


#include "libswscale/swscale.h"
#include "libavutil/imgutils.h"
#include "libavutil/pixelutils.h"
#include "libavutil/pixfmt.h"
#include "libavutil/pixdesc.h"
#include 
#include 
#include 

int main(int argc, char **argv) {

/// Set up a 1280x720 window, and an item with 1/3 width and height of the window.
int window_width, window_height, item_width, item_height;
window_width = 1280;
window_height = 720;
item_width = (window_width / 3);
item_height = (window_height / 3);

int item_out_width = item_width;
/// This line sets the item width to be 32 byte aligned uncomment to see uncorrupted results
/// Note %16 because outformat is 2 bytes per component
//item_out_width -= (item_width % 16);

enum AVPixelFormat outformat = AV_PIX_FMT_YUV422P10LE;
enum AVPixelFormat informat = AV_PIX_FMT_UYVY422;
int window_lines[4] = {0};
av_image_fill_linesizes(window_lines, outformat, window_width);

uint8_t *window_planes[4] = {0};
window_planes[0] = calloc(1, window_lines[0] * window_height);
window_planes[1] = calloc(1, window_lines[1] * window_height);
window_planes[2] = calloc(1, window_lines[2] * window_height); /// Fill the window with all 0s, this is green in yuv.


int item_lines[4] = {0};
av_image_fill_linesizes(item_lines, informat, item_width);

uint8_t *item_planes[4] = {0};
item_planes[0] = malloc(item_lines[0] * item_height);
memset(item_planes[0], 100, item_lines[0] * item_height);

struct SwsContext *ctx;
ctx = sws_getContext(item_width, item_height, informat,
 item_out_width, item_height, outformat, SWS_FAST_BILINEAR, NULL, NULL, NULL);

/// Check a block in the normal region
printf("Pre scale normal region %d %d %d\n", (int)((uint16_t*)window_planes[0])[0], (int)((uint16_t*)window_planes[1])[0],
 (int)((uint16_t*)window_planes[2])[0]);

/// Check a block in the corrupted region (should be all zeros) These values should be out of the converted region
int corrupt_offset_y = (item_out_width + 3) * 2; ///(item_width + 3) * 2 bytes per component Y PLANE
int corrupt_offset_uv = (item_out_width + 3); ///(item_width + 3) * (2 bytes per component rshift 1 for horiz scaling) U and V PLANES

printf("Pre scale corrupted region %d %d %d\n", (int)(*((uint16_t*)(window_planes[0] + corrupt_offset_y))),
 (int)(*((uint16_t*)(window_planes[1] + corrupt_offset_uv))), (int)(*((uint16_t*)(window_planes[2] + corrupt_offset_uv))));
sws_scale(ctx, (const uint8_t**)item_planes, item_lines, 0, item_height,window_planes, window_lines);

/// Preform same tests after scaling
printf("Post scale normal region %d %d %d\n", (int)((uint16_t*)window_planes[0])[0], (int)((uint16_t*)window_planes[1])[0],
 (int)((uint16_t*)window_planes[2])[0]);
printf("Post scale corrupted region %d %d %d\n", (int)(*((uint16_t*)(window_planes[0] + corrupt_offset_y))),
 (int)(*((uint16_t*)(window_planes[1] + corrupt_offset_uv))), (int)(*((uint16_t*)(window_planes[2] + corrupt_offset_uv))));

return 0;



}


Example Output:

//No alignment
Pre scale normal region 0 0 0
Pre scale corrupted region 0 0 0
Post scale normal region 400 400 400
Post scale corrupted region 512 36865 36865

//With alignment
Pre scale normal region 0 0 0
Pre scale corrupted region 0 0 0
Post scale normal region 400 400 400
Post scale corrupted region 0 0 0



-
Streaming audio from FFMPEG to browser via WebSocket and WebAudioApi
17 novembre 2022, par Sebi O.My project has 2 parts :


- 

- a web interface that the user accesses
- and a standalone app installed on the computer, that acts as a websocket server.






From the web UI, the user has to hear his computer's microphone.
At this moment, I have a working solution that listens to microphone and sends the raw PCM audio chunks back to web-UI which is able to play them. But some serious lag gets added in time, despite it all runs on the same computer, so there's no internet latency/etc. That is why I am testing FFMPEG now.


So, here's the FFMPEG command for streaming microphone data :


ffmpeg.exe -re -f dshow -i audio="Microphone (HD Pro Webcam C920)" -ar 44100 -ac 1 -f f32le pipe:1



Data gets sent successfully via websocket, but playing it using WebAudioApi is not working, i mean i don't hear anything.


Can anyone point me to what am I doing wrong ?
Here's the web javascript :


let ipOfAudioServer = 'localhost';

 let wsClient = null;
 
 var audioCtx = null;
 var subcounter = 0;
 var audiobuffer = [];
 var source = null;
 
 // must match the values in the audio-server. Thought despite audio-server could send 2channels.. we resume to only one, to save bandwidth
 var sampleRate = 44100; 
 var channels = 1;
 
 var microphone = 'Microphone (HD Pro Webcam C920)';
 
 
 // this method reads current position from the audiobuffer and plays the audio
 // the method will re-call itself, in order to play the next item in queue
 this.play = function(soundName) {

 var ffs = audiobuffer[subcounter];
 
 if (ffs) {
 var frameCount = ffs.byteLength;
 console.log(frameCount, audiobuffer.length);
 
 var myAudioBuffer = audioCtx.createBuffer(channels, frameCount, sampleRate); 
 myAudioBuffer.getChannelData(0).set(ffs)
 
 if (myAudioBuffer != null)
 { 
 subcounter += 1;
 
 source = audioCtx.createBufferSource();
 source.buffer = myAudioBuffer;
 source.connect(audioCtx.destination);
 source.onended = () => { console.log("finished, continuing to seek buffer!"); play(soundName); }
 source.start();
 } 
 }
 // just in case the counter got to be bigger than the actual amount of items in the list, set it back to last one
 if (subcounter > audiobuffer.length)
 subcounter = audiobuffer.length;
 };
 
 
 // the method to initialize WS client
 this.initWebsocketClient = function ()
 {
 if (wsClient == null)
 {
 wsClient = new WebSocket(`ws://${ipOfAudioServer}:23233`, "protocol");
 wsClient.binaryType = "arraybuffer";
 
 wsClient.onmessage = function (event) 
 {
 if (typeof event.data === 'object') {
 
 console.log(event.data, event.data.size);
 
 // clear memory in case buffer is already too big
 if (subcounter > 50) {
 console.log('cleared memory');
 audiobuffer = [];
 subcounter = 0;
 }
 
 
 audiobuffer.push(event.data);
 if (audiobuffer.length == 1) {
 play('sinewave');
 }
 
 }
 else {
 if (event.data == 'stopMicrophone=ok') {
 wsClient.close();
 wsClient = null;
 
 audiobuffer = [];
 subcounter = 0;
 }
 
 console.log(event.data);
 }
 }
 }
 };

 // method used in send() which will actually send the message only after connection has been established successfully.
 this.waitForConnection = function (callback, interval) {
 if (wsClient.readyState === 1) {
 callback();
 } else {
 var that = this;
 // optional: implement backoff for interval here
 setTimeout(function () {
 that.waitForConnection(callback, interval);
 }, interval);
 }
 };
 
 // using this method to send WS messages to the audio-server
 this.send = function (message, callback) 
 {
 this.initWebsocketClient();
 
 this.waitForConnection(function () {
 wsClient.send(message);
 if (typeof callback !== 'undefined') {
 callback();
 }
 }, 1000);
 };
 
 // called by clicking the start button
 function startCapture() {
 if (audioCtx == null)
 audioCtx = new (window.AudioContext || window.webkitAudioContext)();
 
 audiobuffer = [];
 subcounter = 0;
 
 this.send(`startMicrophone?device=${microphone}`);
 }

 // called by clicking the stop button
 function stopCapture() {
 this.send('stopMicrophone');
 }