
Recherche avancée
Médias (91)
-
Collections - Formulaire de création rapide
19 février 2013, par
Mis à jour : Février 2013
Langue : français
Type : Image
-
Les Miserables
4 juin 2012, par
Mis à jour : Février 2013
Langue : English
Type : Texte
-
Ne pas afficher certaines informations : page d’accueil
23 novembre 2011, par
Mis à jour : Novembre 2011
Langue : français
Type : Image
-
The Great Big Beautiful Tomorrow
28 octobre 2011, par
Mis à jour : Octobre 2011
Langue : English
Type : Texte
-
Richard Stallman et la révolution du logiciel libre - Une biographie autorisée (version epub)
28 octobre 2011, par
Mis à jour : Octobre 2011
Langue : English
Type : Texte
-
Rennes Emotion Map 2010-11
19 octobre 2011, par
Mis à jour : Juillet 2013
Langue : français
Type : Texte
Autres articles (47)
-
Configuration spécifique pour PHP5
4 février 2011, parPHP5 est obligatoire, vous pouvez l’installer en suivant ce tutoriel spécifique.
Il est recommandé dans un premier temps de désactiver le safe_mode, cependant, s’il est correctement configuré et que les binaires nécessaires sont accessibles, MediaSPIP devrait fonctionner correctement avec le safe_mode activé.
Modules spécifiques
Il est nécessaire d’installer certains modules PHP spécifiques, via le gestionnaire de paquet de votre distribution ou manuellement : php5-mysql pour la connectivité avec la (...) -
Emballe médias : à quoi cela sert ?
4 février 2011, parCe plugin vise à gérer des sites de mise en ligne de documents de tous types.
Il crée des "médias", à savoir : un "média" est un article au sens SPIP créé automatiquement lors du téléversement d’un document qu’il soit audio, vidéo, image ou textuel ; un seul document ne peut être lié à un article dit "média" ; -
Keeping control of your media in your hands
13 avril 2011, parThe vocabulary used on this site and around MediaSPIP in general, aims to avoid reference to Web 2.0 and the companies that profit from media-sharing.
While using MediaSPIP, you are invited to avoid using words like "Brand", "Cloud" and "Market".
MediaSPIP is designed to facilitate the sharing of creative media online, while allowing authors to retain complete control of their work.
MediaSPIP aims to be accessible to as many people as possible and development is based on expanding the (...)
Sur d’autres sites (4900)
-
CUDA_ERORR_INVALID_CONTEXT
15 août 2021, par Meme MachineI am making a desktop sharing application based off of these repositories from NVIDIA.


https://github.com/NVIDIA/video-sdk-samples/tree/master/nvEncDXGIOutputDuplicationSample


https://github.com/NVIDIA/video-sdk-samples/blob/master/Samples/AppDecode/AppDecD3D/


https://github.com/NVIDIA/video-sdk-samples/tree/master/Samples/AppDecode/AppDecMem


I intend to have a setup function that is called once when Remote Desktop is selected, and then a second function that actually displays the received frames which is called when a frame is received


The below functions are nearly identical to the main() and NvD3D() functions found in AppDecD3D and AppDecMem repositories


CUcontext cuContext = NULL; // maybe it has to do with this variable?

int setup()
{
 char szInFilePath[256] = "C:\\Users\\Admin\\Desktop\\test.h264";
 int iGpu = 0;
 int iD3d = 0;
 try
 {
 //ParseCommandLine(argc, argv, szInFilePath, NULL, iGpu, NULL, &iD3d);
 CheckInputFile(szInFilePath);

 ck(cuInit(0));
 int nGpu = 0;
 ck(cuDeviceGetCount(&nGpu));
 if (iGpu < 0 || iGpu >= nGpu)
 {
 std::ostringstream err;
 err << "GPU ordinal out of range. Should be within [" << 0 << ", " << nGpu - 1 << "]" << std::endl;
 throw std::invalid_argument(err.str());
 }
 CUdevice cuDevice = 0;
 ck(cuDeviceGet(&cuDevice, iGpu));
 char szDeviceName[80];
 ck(cuDeviceGetName(szDeviceName, sizeof(szDeviceName), cuDevice));
 std::cout << "GPU in use: " << szDeviceName << std::endl;

 ck(cuCtxCreate(&cuContext, CU_CTX_SCHED_BLOCKING_SYNC, cuDevice));
 //NvDecD3D<framepresenterd3d11>(szInFilePath);

 std::cout << "Display with D3D11." << std::endl;
 }
 catch (const std::exception& ex)
 {
 std::cout << ex.what();
 exit(1);
 }
 return 0;
}

template<class typename="std::enable_if<std::is_base_of<FramePresenterD3D," framepresentertype="framepresentertype">::value>>
int NvDecD3D(char* szInFilePath)
{
 FileDataProvider dp(szInFilePath);
 FFmpegDemuxer demuxer(&dp);
 NvDecoder dec(cuContext, demuxer.GetWidth(), demuxer.GetHeight(), true, FFmpeg2NvCodecId(demuxer.GetVideoCodec()));
 FramePresenterType presenter(cuContext, demuxer.GetWidth(), demuxer.GetHeight());
 CUdeviceptr dpFrame = 0;
 ck(cuMemAlloc(&dpFrame, demuxer.GetWidth() * demuxer.GetHeight() * 4));
 int nVideoBytes = 0, nFrameReturned = 0, nFrame = 0;
 uint8_t* pVideo = NULL, ** ppFrame;

 do
 {
 demuxer.Demux(&pVideo, &nVideoBytes);
 dec.Decode(pVideo, nVideoBytes, &ppFrame, &nFrameReturned);
 if (!nFrame && nFrameReturned)
 LOG(INFO) << dec.GetVideoInfo();

 for (int i = 0; i < nFrameReturned; i++)
 {
 if (dec.GetBitDepth() == 8)
 Nv12ToBgra32((uint8_t*)ppFrame[i], dec.GetWidth(), (uint8_t*)dpFrame, 4 * dec.GetWidth(), dec.GetWidth(), dec.GetHeight());
 else
 P016ToBgra32((uint8_t*)ppFrame[i], 2 * dec.GetWidth(), (uint8_t*)dpFrame, 4 * dec.GetWidth(), dec.GetWidth(), dec.GetHeight());
 presenter.PresentDeviceFrame((uint8_t*)dpFrame, demuxer.GetWidth() * 4);
 }
 nFrame += nFrameReturned;
 } while (nVideoBytes);
 ck(cuMemFree(dpFrame));
 std::cout << "Total frame decoded: " << nFrame << std::endl;
 return 0;
}
</class></framepresenterd3d11>


Notice the line
NvDecD3D<framepresenterd3d11>(szInFilePath);</framepresenterd3d11>
? I plan to callNvDecD3D()
when a frame is received. So, I commented out the call insetup()
and moved it to my asio:async_read function. (see below)

void do_read_body()
 {
 readBuffer.reserve(_read_msg.ReadLength);
 _read_msg.Body = readBuffer.data();
 auto self(shared_from_this());
 asio::async_read(_socket,
 asio::buffer(_read_msg.Body, _read_msg.ReadLength),
 [this, self](std::error_code ec, std::size_t /*length*/)
 {
 if (!ec)
 {
 if (_read_msg.CmdId == 0x5)
 {
 std::cout << "Received a frame" << std::endl;

 NvDecD3D<framepresenterd3d11>(szInFilePath);
 }
 else
 {
 std::cout << std::string(_read_msg.Body, 0, _read_msg.ReadLength) << std::endl;
 }
 
 do_read_header();
 }
 else
 {
 _room.leave(shared_from_this());
 }
 });
 }
</framepresenterd3d11>


However, when I go to execute it, I get
CUDA_ERORR_INVALID_CONTEXT
whencuMemAlloc()
is called. If I uncomment the call toNvDecD3D()
insidesetup()
and call it from there, it does not error however.

Do you have any idea what could be causing this problem ? Perhaps it is related to the ASIO.


-
Fighting with the VP8 Spec
4 juin 2010, par Multimedia Mike — VP8As stated in a previous blog post on the matter, FFmpeg’s policy is to reimplement codecs rather than adopt other codebases wholesale. And so it is with Google’s recently open sourced VP8 codec, the video portion of their Webm initiative. I happen to know that the new FFmpeg implementation is in the capable hands of several of my co-developers so I’m not even worrying about that angle.
Instead, I thought of another of my characteristically useless exercises : Create an independent VP8 decoder implementation entirely in pure Python. Silly ? Perhaps. But it has one very practical application : By attempting to write a new decoder based on the official bitstream documentation, this could serve as a mechanism for validating said spec, something near and dear to my heart.
What is the current state of the spec ? Let me reiterate that I’m glad it exists. As I stated during the initial open sourcing event, everything that Google produced for the initial event went well beyond my wildest expectations. Having said that, the documentation does fall short in a number of places. Fortunately, I am on the Webm mailing lists and am sending in corrections and ideas for general improvement. For the most part, I have been able to understand the general ideas behind the decoding flow based on the spec and am even able to implement certain pieces correctly. Then I usually instrument the libvpx source code with output statements in order to validate that I’m doing everything right.
Token Blocker
Unfortunately, I’m quite blocked right now on the chapter regarding token/DCT coefficient decoding (chapter 13 in the current document iteration). In his seminal critique of the codec, Dark Shikari complained that large segments of the spec are just C code fragments copy and pasted from the official production decoder. As annoying as that is, the biggest insult comes at the end of section 13.3 :While we have in fact completely described the coefficient decoding procedure, the reader will probably find it helpful to consult the reference implementation, which can be found in the file detokenize.c.
The reader most certainly will not find it helpful to consult the file detokenize.c. The file in question implements the coefficient residual decoding with an unholy sequence of C macros that contain goto statements. Honestly, I thought I did understand the coefficient decoding procedure based on the spec’s description. But my numbers don’t match up with the official decoder. Instrumenting or tracing macro’d code is obviously painful and studying the same code is making me think I don’t understand the procedure after all. To be fair, entropy decoding often occupies a lot of CPU time for many video decoders and I have little doubt that the macro/goto approach is much faster than clearer, more readable methods. It’s just highly inappropriate to refer to it for pedagogical purposes.
Aside : For comparison, check out the reference implementation for the VC-1 codec. It was written so clearly and naively that the implementors used an O(n) Huffman decoder. That’s commitment to clarity.
I wonder if my FFmpeg cohorts are having better luck with the DCT residue decoding in their new libavcodec implementation ? Maybe if I can get this Python decoder working, it can serve as a more appropriate reference decoder.
Update : Almost immediately after I posted this entry, I figured out a big problem that was holding me back, and then several more small ones, and finally decoded by first correct DCT coefficient from the stream (I’ve never been so happy to see the number -448). I might be back on track now. Even better was realizing that my original understanding of the spec was correct.
Unrelated
I found this image on the Doom9 forums. I ROFL’d :
It’s probably unfair and inaccurate but you have to admit it’s funny. Luckily, quality nitpickings aren’t my department. I’m just interested in getting codecs working, tested, and documented so that more people can use them reliably.
-
How can I make my loop command for a pythin discord bot work ?
17 octobre 2022, par Dioso I am kinda new to python and wanted to make a discord music bot. It works pretty well, except that I am not able to figure out how to code the queue loop command and the playing now command(this one gives me a full link instead of just the name). I have bolded the 2 commands that I cannot figure out.


from ast import alias
import discord
from discord.ext import commands

from youtube_dl import YoutubeDL

class music_cog(commands.Cog):
 def __init__(self, bot):
 self.bot = bot
 
 #all the music related stuff
 self.is_playing = False
 self.is_paused = False
 self.is_loop = False
 self.now_playing =""
 # 2d array containing [song, channel]
 self.music_queue = []
 self.YDL_OPTIONS = {'format': 'bestaudio', 'noplaylist':'True'}
 self.FFMPEG_OPTIONS = {'before_options': '-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5', 'options': '-vn'}

 self.vc = None

 #searching the item on youtube
 def search_yt(self, item):
 with YoutubeDL(self.YDL_OPTIONS) as ydl:
 try: 
 info = ydl.extract_info("ytsearch:%s" % item, download=False)['entries'][0]
 except Exception: 
 return False

 return {'source': info['formats'][0]['url'], 'title': info['title']}

 def play_next(self):
 if len(self.music_queue) > 0:
 self.is_playing = True

 #get the first url
 m_url = self.music_queue[0][0]['source']
 self.now_playing = self.music_queue[0][0]['title']
 #remove the first element as you are currently playing it
 self.music_queue.pop(0)

 self.vc.play(discord.FFmpegPCMAudio(m_url, **self.FFMPEG_OPTIONS), after=lambda e: self.play_next())
 else:
 self.is_playing = False

 # infinite loop checking 
 async def play_music(self, ctx):
 if len(self.music_queue) > 0:
 self.is_playing = True
 m_url = self.music_queue[0][0]['source']
 
 #try to connect to voice channel if you are not already connected
 if self.vc == None or not self.vc.is_connected():
 self.vc = await self.music_queue[0][1].connect()

 #in case we fail to connect
 if self.vc == None:
 await ctx.send("Could not connect to the voice channel")
 return
 else:
 await self.vc.move_to(self.music_queue[0][1])
 self.now_playing = m_url
 #remove the first element as you are currently playing it
 self.music_queue.pop(0)

 self.vc.play(discord.FFmpegPCMAudio(m_url, **self.FFMPEG_OPTIONS), after=lambda e: self.play_next())
 else:
 self.is_playing = False

 @commands.command(name="play", aliases=["p","playing"], help="Plays a selected song from youtube")
 async def play(self, ctx, *args):
 query = " ".join(args)
 
 voice_channel = ctx.author.voice.channel
 if voice_channel is None:
 #you need to be connected so that the bot knows where to go
 await ctx.send("Connect to a voice channel!")
 elif self.is_paused:
 self.vc.resume()
 else:
 global song
 song = self.search_yt(query)
 if type(song) == type(True):
 await ctx.send("Could not download the song. Incorrect format try another keyword. This could be due to playlist or a livestream format.")
 else:
 await ctx.send("Song added to the queue")
 self.music_queue.append([song, voice_channel])
 
 if self.is_playing == False:
 await self.play_music(ctx)

 @commands.command(name="pause", help="Pauses the current song being played")
 async def pause(self, ctx, *args):
 if self.is_playing:
 self.is_playing = False
 self.is_paused = True
 self.vc.pause()
 await ctx.send("Music paused")
 elif self.is_paused:
 self.is_paused = False
 self.is_playing = True
 self.vc.resume()
 await ctx.send("Music resumed")

 @commands.command(name = "resume", aliases=["r"], help="Resumes playing with the discord bot")
 async def resume(self, ctx, *args):
 if self.is_paused:
 self.is_paused = False
 self.is_playing = True
 self.vc.resume()
 await ctx.send("Music resumed")
 else:
 await ctx.send("Music is not paused")

 @commands.command(name="skip", aliases=["s"], help="Skips the current song being played")
 async def skip(self, ctx):
 if self.vc != None and self.vc:
 self.vc.stop()
 #try to play next in the queue if it exists
 await self.play_music(ctx)
 await ctx.send("Skipped current song")

 @commands.command(name="queue", aliases=["q"], help="Displays the current songs in queue")
 async def queue(self, ctx):
 global retval 
 retval = "```"
 for i in range(0, len(self.music_queue)):
 # display a max of 5 songs in the current queue
 #if (i > 4): break
 l = str(i+1)
 retval += l +". " + self.music_queue[i][0]['title'] + "\n"

 if retval != "```":
 retval+="```"
 await ctx.send(retval)
 else:
 await ctx.send("No music in queue")

 **@commands.command(name="loop", help="Loops the queue")**
 async def loop(self, ctx):
 if self.is_loop == False:
 self.is_loop = True
 else:
 self.is_loop = False 
 if self.is_loop == True:
 if len(self.music_queue)==0:
 await ctx.send("No music to loop")
 else:
 i=0
 while self.is_loop == True and i code>