
Recherche avancée
Médias (91)
-
Les Miserables
9 décembre 2019, par
Mis à jour : Décembre 2019
Langue : français
Type : Textuel
-
VideoHandle
8 novembre 2019, par
Mis à jour : Novembre 2019
Langue : français
Type : Video
-
Somos millones 1
21 juillet 2014, par
Mis à jour : Juin 2015
Langue : français
Type : Video
-
Un test - mauritanie
3 avril 2014, par
Mis à jour : Avril 2014
Langue : français
Type : Textuel
-
Pourquoi Obama lit il mes mails ?
4 février 2014, par
Mis à jour : Février 2014
Langue : français
-
IMG 0222
6 octobre 2013, par
Mis à jour : Octobre 2013
Langue : français
Type : Image
Autres articles (53)
-
Des sites réalisés avec MediaSPIP
2 mai 2011, parCette page présente quelques-uns des sites fonctionnant sous MediaSPIP.
Vous pouvez bien entendu ajouter le votre grâce au formulaire en bas de page. -
HTML5 audio and video support
13 avril 2011, parMediaSPIP uses HTML5 video and audio tags to play multimedia files, taking advantage of the latest W3C innovations supported by modern browsers.
The MediaSPIP player used has been created specifically for MediaSPIP and can be easily adapted to fit in with a specific theme.
For older browsers the Flowplayer flash fallback is used.
MediaSPIP allows for media playback on major mobile platforms with the above (...) -
De l’upload à la vidéo finale [version standalone]
31 janvier 2010, parLe chemin d’un document audio ou vidéo dans SPIPMotion est divisé en trois étapes distinctes.
Upload et récupération d’informations de la vidéo source
Dans un premier temps, il est nécessaire de créer un article SPIP et de lui joindre le document vidéo "source".
Au moment où ce document est joint à l’article, deux actions supplémentaires au comportement normal sont exécutées : La récupération des informations techniques des flux audio et video du fichier ; La génération d’une vignette : extraction d’une (...)
Sur d’autres sites (7457)
-
How to encode Planar 4:2:0 (fourcc P010)
20 juillet 2021, par DennisFleurbaaijI'm trying to recode fourcc V210 (which is a packed YUV4:2:2 format) into a P010 (planar YUV4:2:0). I think I've implemented it according to spec, but the renderer is giving a wrong image so something is off. Decoding the V210 has a decent example in ffmpeg (defines are modified from their solution) but I can't find a P010 encoder to look at what I'm doing wrong.


(Yes, I've tried ffmpeg and that works but it's too slow for this, it takes 30ms per frame on an Intel Gen11 i7)


Clarification (after @Frank's question) : The frames being processed are 4k (3840px wide) and hence there is no code for doing the 128b alignment.


This is running on intel so little endian conversions applied.


Try1 - all green image :


The following code


#define V210_READ_PACK_BLOCK(a, b, c) \
 do { \
 val = *src++; \
 a = val & 0x3FF; \
 b = (val >> 10) & 0x3FF; \
 c = (val >> 20) & 0x3FF; \
 } while (0)

#define PIXELS_PER_PACK 6
#define BYTES_PER_PACK (4*4)

void MyClass::FormatVideoFrame(
 BYTE* inFrame,
 BYTE* outBuffer)
{
 const uint32_t pixels = m_height * m_width;

 const uint32_t* src = (const uint32_t *)inFrame);

 uint16_t* dstY = (uint16_t *)outBuffer;

 uint16_t* dstUVStart = (uint16_t*)(outBuffer + ((ptrdiff_t)pixels * sizeof(uint16_t)));
 uint16_t* dstUV = dstUVStart;

 const uint32_t packsPerLine = m_width / PIXELS_PER_PACK;

 for (uint32_t line = 0; line < m_height; line++)
 {
 for (uint32_t pack = 0; pack < packsPerLine; pack++)
 {
 uint32_t val;
 uint16_t u, y1, y2, v;

 if (pack % 2 == 0)
 {
 V210_READ_PACK_BLOCK(u, y1, v);
 *dstUV++ = u;
 *dstY++ = y1;
 *dstUV++ = v;

 V210_READ_PACK_BLOCK(y1, u, y2);
 *dstY++ = y1;
 *dstUV++ = u;
 *dstY++ = y2;

 V210_READ_PACK_BLOCK(v, y1, u);
 *dstUV++ = v;
 *dstY++ = y1;
 *dstUV++ = u;

 V210_READ_PACK_BLOCK(y1, v, y2);
 *dstY++ = y1;
 *dstUV++ = v;
 *dstY++ = y2;
 }
 else
 {
 V210_READ_PACK_BLOCK(u, y1, v);
 *dstY++ = y1;

 V210_READ_PACK_BLOCK(y1, u, y2);
 *dstY++ = y1;
 *dstY++ = y2;

 V210_READ_PACK_BLOCK(v, y1, u);
 *dstY++ = y1;

 V210_READ_PACK_BLOCK(y1, v, y2);
 *dstY++ = y1;
 *dstY++ = y2;
 }
 }
 }

#ifdef _DEBUG

 // Fully written Y space
 assert(dstY == dstUVStart);

 // Fully written UV space
 const BYTE* expectedVurrentUVPtr = outBuffer + (ptrdiff_t)GetOutFrameSize();
 assert(expectedVurrentUVPtr == (BYTE *)dstUV);

#endif
}

// This is called to determine outBuffer size
LONG MyClass::GetOutFrameSize() const
{
 const LONG pixels = m_height * m_width;

 return
 (pixels * sizeof(uint16_t)) + // Every pixel 1 y
 (pixels / 2 / 2 * (2 * sizeof(uint16_t))); // Every 2 pixels and every odd row 2 16-bit numbers
}



Leads to all green image. This turned out to be a missing bit shift to place the 10 bits in the upper bits of the 16-bit value as per the P010 spec.


Try 2 - Y works, UV doubled ?


Updated the code to properly (or so I think) shifts the YUV values to the correct position in their 16-bit space.


#define V210_READ_PACK_BLOCK(a, b, c) \
 do { \
 val = *src++; \
 a = val & 0x3FF; \
 b = (val >> 10) & 0x3FF; \
 c = (val >> 20) & 0x3FF; \
 } while (0)


#define P010_WRITE_VALUE(d, v) (*d++ = (v << 6))

#define PIXELS_PER_PACK 6
#define BYTES_PER_PACK (4 * sizeof(uint32_t))

// Snipped constructor here which guarantees that we're processing
// something which does not violate alignment.

void MyClass::FormatVideoFrame(
 const BYTE* inBuffer,
 BYTE* outBuffer)
{ 
 const uint32_t pixels = m_height * m_width;
 const uint32_t aligned_width = ((m_width + 47) / 48) * 48;
 const uint32_t stride = aligned_width * 8 / 3;

 uint16_t* dstY = (uint16_t *)outBuffer;

 uint16_t* dstUVStart = (uint16_t*)(outBuffer + ((ptrdiff_t)pixels * sizeof(uint16_t)));
 uint16_t* dstUV = dstUVStart;

 const uint32_t packsPerLine = m_width / PIXELS_PER_PACK;

 for (uint32_t line = 0; line < m_height; line++)
 {
 // Lines start at 128 byte alignment
 const uint32_t* src = (const uint32_t*)(inBuffer + (ptrdiff_t)(line * stride));

 for (uint32_t pack = 0; pack < packsPerLine; pack++)
 {
 uint32_t val;
 uint16_t u, y1, y2, v;

 if (pack % 2 == 0)
 {
 V210_READ_PACK_BLOCK(u, y1, v);
 P010_WRITE_VALUE(dstUV, u);
 P010_WRITE_VALUE(dstY, y1);
 P010_WRITE_VALUE(dstUV, v);

 V210_READ_PACK_BLOCK(y1, u, y2);
 P010_WRITE_VALUE(dstY, y1);
 P010_WRITE_VALUE(dstUV, u);
 P010_WRITE_VALUE(dstY, y2);

 V210_READ_PACK_BLOCK(v, y1, u);
 P010_WRITE_VALUE(dstUV, v);
 P010_WRITE_VALUE(dstY, y1);
 P010_WRITE_VALUE(dstUV, u);

 V210_READ_PACK_BLOCK(y1, v, y2);
 P010_WRITE_VALUE(dstY, y1);
 P010_WRITE_VALUE(dstUV, v);
 P010_WRITE_VALUE(dstY, y2);
 }
 else
 {
 V210_READ_PACK_BLOCK(u, y1, v);
 P010_WRITE_VALUE(dstY, y1);

 V210_READ_PACK_BLOCK(y1, u, y2);
 P010_WRITE_VALUE(dstY, y1);
 P010_WRITE_VALUE(dstY, y2);

 V210_READ_PACK_BLOCK(v, y1, u);
 P010_WRITE_VALUE(dstY, y1);

 V210_READ_PACK_BLOCK(y1, v, y2);
 P010_WRITE_VALUE(dstY, y1);
 P010_WRITE_VALUE(dstY, y2);
 }
 }
 }

#ifdef _DEBUG

 // Fully written Y space
 assert(dstY == dstUVStart);

 // Fully written UV space
 const BYTE* expectedVurrentUVPtr = outBuffer + (ptrdiff_t)GetOutFrameSize();
 assert(expectedVurrentUVPtr == (BYTE *)dstUV);

#endif
}



This leads to the Y being correct and the amount of lines for U and V as well, but somehow U and V are not overlaid properly. There are two versions of it seemingly mirrored through the center vertical. Something similar but less visible for zeroing out V. So both of these are getting rendered at half the width ? Any tips appreciated :)


Fix :
Found the bug, I'm flipping VU not per pack but per block


if (pack % 2 == 0)



Should be


if (line % 2 == 0)



-
ffmpeg moving text drawtext
5 juillet 2021, par BOBI'm using ffmpeg library to draw text on video in specific time and i'm success to do that Now i need to move the text from position to another and i can't do that so can any one suggest me how to do that



i'm using this command to move text from top to down but i can't determine the x and Y to move from the x,y to specific x,y



ffmpeg -i VideoInput.mp4 -vf "drawtext=enable='between(t,12,14)':fontfile=myfont.otf:text='Test test':x=(w-text_w)/2:y=w/50\*mod(t\,2):fontsize=65" -acodec copy outputVideo.mp4



-
Display ffmpeg frames on opgel texture
24 octobre 2014, par nakiI am using
Dranger
tutorial01 (ffmpeg
) to decode the video and getAVI
frames. I want to useOpenGL
to display the video.http://dranger.com/ffmpeg/tutorial01.html
The main function is as follows :
int main (int argc, char** argv) {
// opengl stuff
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGBA);
glutInitWindowSize(800, 600);
glutCreateWindow("Hello GL");
glutReshapeFunc(changeViewport);
glutDisplayFunc(render);
GLenum err = glewInit();
if(GLEW_OK !=err){
fprintf(stderr, "GLEW error");
return 1;
}
glClear(GL_COLOR_BUFFER_BIT);
glEnable(GL_TEXTURE_2D);
GLuint texture;
glGenTextures(1, &texture); //Make room for our texture
glBindTexture(GL_TEXTURE_2D, texture);
//ffmpeg stuff
AVFormatContext *pFormatCtx = NULL;
int i, videoStream;
AVCodecContext *pCodecCtx = NULL;
AVCodec *pCodec = NULL;
AVFrame *pFrame = NULL;
AVFrame *pFrameRGB = NULL;
AVPacket packet;
int frameFinished;
int numBytes;
uint8_t *buffer = NULL;
AVDictionary *optionsDict = NULL;
if(argc < 2) {
printf("Please provide a movie file\n");
return -1;
}
// Register all formats and codecs
av_register_all();
// Open video file
if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL)!=0)
return -1; // Couldn't open file
// Retrieve stream information
if(avformat_find_stream_info(pFormatCtx, NULL)<0)
return -1; // Couldn't find stream information
// Dump information about file onto standard error
av_dump_format(pFormatCtx, 0, argv[1], 0);
// Find the first video stream
videoStream=-1;
for(i=0; inb_streams; i++)
if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
videoStream=i;
break;
}
if(videoStream==-1)
return -1; // Didn't find a video stream
// Get a pointer to the codec context for the video stream
pCodecCtx=pFormatCtx->streams[videoStream]->codec;
// Find the decoder for the video stream
pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
if(pCodec==NULL) {
fprintf(stderr, "Unsupported codec!\n");
return -1; // Codec not found
}
// Open codec
if(avcodec_open2(pCodecCtx, pCodec, &optionsDict)<0)
return -1; // Could not open codec
// Allocate video frame
pFrame=av_frame_alloc();
// Allocate an AVFrame structure
pFrameRGB=av_frame_alloc();
if(pFrameRGB==NULL)
return -1;
// Determine required buffer size and allocate buffer
numBytes=avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width,
pCodecCtx->height);
buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
struct SwsContext *sws_ctx = sws_getContext(pCodecCtx->width,
pCodecCtx->height, pCodecCtx->pix_fmt, 800,
600, PIX_FMT_RGB24, SWS_BICUBIC, NULL,
NULL, NULL);
// Assign appropriate parts of buffer to image planes in pFrameRGB
// Note that pFrameRGB is an AVFrame, but AVFrame is a superset
// of AVPicture
avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24,
pCodecCtx->width, pCodecCtx->height);
// Read frames and save first five frames to disk
i=0;
while(av_read_frame(pFormatCtx, &packet)>=0) {
// Is this a packet from the video stream?
if(packet.stream_index==videoStream) {
// Decode video frame
avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished,
&packet);
// Did we get a video frame?
if(frameFinished) {
// Convert the image from its native format to RGB
/* sws_scale
(
sws_ctx,
(uint8_t const * const *)pFrame->data,
pFrame->linesize,
0,
pCodecCtx->height,
pFrameRGB->data,
pFrameRGB->linesize
);
*/
sws_scale(sws_ctx, pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize);
// additional opengl
glBindTexture(GL_TEXTURE_2D, texture);
//gluBuild2DMipmaps(GL_TEXTURE_2D, 3, pCodecCtx->width, pCodecCtx->height, GL_RGB, GL_UNSIGNED_INT, pFrameRGB->data[0]);
// glTexSubImage2D(GL_TEXTURE_2D, 0, 0,0, 840, 460, GL_RGB, GL_UNSIGNED_BYTE, pFrameRGB->data[0]);
glTexImage2D(GL_TEXTURE_2D, //Always GL_TEXTURE_2D
0, //0 for now
GL_RGB, //Format OpenGL uses for image
pCodecCtx->width, pCodecCtx->height, //Width and height
0, //The border of the image
GL_RGB, //GL_RGB, because pixels are stored in RGB format
GL_UNSIGNED_BYTE, //GL_UNSIGNED_BYTE, because pixels are stored
//as unsigned numbers
pFrameRGB->data[0]); //The actual pixel data
// additional opengl end
// Save the frame to disk
if(++i<=5)
SaveFrame(pFrameRGB, pCodecCtx->width, pCodecCtx->height,
i);
}
}
glColor3f(1,1,1);
glBindTexture(GL_TEXTURE_2D, texture);
glBegin(GL_QUADS);
glTexCoord2f(0,1);
glVertex3f(0,0,0);
glTexCoord2f(1,1);
glVertex3f(pCodecCtx->width,0,0);
glTexCoord2f(1,0);
glVertex3f(pCodecCtx->width, pCodecCtx->height,0);
glTexCoord2f(0,0);
glVertex3f(0,pCodecCtx->height,0);
glEnd();
// Free the packet that was allocated by av_read_frame
av_free_packet(&packet);
}
// Free the RGB image
av_free(buffer);
av_free(pFrameRGB);
// Free the YUV frame
av_free(pFrame);
// Close the codec
avcodec_close(pCodecCtx);
// Close the video file
avformat_close_input(&pFormatCtx);
return 0;
}Unfortunately i could not find my solution here
ffmpeg video to opengl texture
The program compiles but does not show any video on the texture. Just a
OpenGL
window is created.