Recherche avancée

Médias (0)

Mot : - Tags -/objet éditorial

Aucun média correspondant à vos critères n’est disponible sur le site.

Autres articles (48)

  • Les autorisations surchargées par les plugins

    27 avril 2010, par

    Mediaspip core
    autoriser_auteur_modifier() afin que les visiteurs soient capables de modifier leurs informations sur la page d’auteurs

  • Publier sur MédiaSpip

    13 juin 2013

    Puis-je poster des contenus à partir d’une tablette Ipad ?
    Oui, si votre Médiaspip installé est à la version 0.2 ou supérieure. Contacter au besoin l’administrateur de votre MédiaSpip pour le savoir

  • Ajouter notes et légendes aux images

    7 février 2011, par

    Pour pouvoir ajouter notes et légendes aux images, la première étape est d’installer le plugin "Légendes".
    Une fois le plugin activé, vous pouvez le configurer dans l’espace de configuration afin de modifier les droits de création / modification et de suppression des notes. Par défaut seuls les administrateurs du site peuvent ajouter des notes aux images.
    Modification lors de l’ajout d’un média
    Lors de l’ajout d’un média de type "image" un nouveau bouton apparait au dessus de la prévisualisation (...)

Sur d’autres sites (6076)

  • Display YUV420P video using OpenGLES on iPhone

    25 décembre 2014, par user3487978

    I am writing an app displaying YUV420p video using ffmpeg library on iPhone, every thing works fine, but the OpenGL color is a little strange, see picture, the bellow image is displaying using AVPicture in RGB format, above is using AVPicture in YUV format.
    any ideas ?

    my shader

    varying highp vec2 varTexcoord;
    uniform sampler2D SamplerY;
    uniform sampler2D SamplerUV;
    void main (void)
    {
       mediump vec3 yuv;
       lowp vec3 rgb;
       yuv.x = texture2D(SamplerY, varTexcoord).r;
       yuv.yz = texture2D(SamplerUV, varTexcoord).rg - vec2(0.5, 0.5);
       // Using BT.709 which is the standard for HDTV
       rgb = mat3(      1,       1,      1,
                   0, -.18732, 1.8556,
                   1.57481, -.46813,      0) * yuv;
       gl_FragColor = vec4(rgb,1.0);
    }

    Renderer :

    glActiveTexture(GL_TEXTURE0);
    glTexImage2D(GL_TEXTURE_2D,
                0,
                GL_LUMINANCE,
                (GLsizei)model.width,
                (GLsizei)model.height,
                0,
                GL_LUMINANCE,
                GL_UNSIGNED_BYTE,
                [model.y bytes]);
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
    glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
    glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);

    glActiveTexture(GL_TEXTURE1);
    glTexImage2D(GL_TEXTURE_2D,
                0,
                GL_LUMINANCE,
                (GLsizei)model.width/2,
                (GLsizei)model.height/2,
                0,
                GL_LUMINANCE,
                GL_UNSIGNED_BYTE,
                [model.uv bytes]);
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
    glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
    glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);

    image on bottom is using RGB, above is using YUV

  • Streaming client over TCP and RTSP through Wi-Fi or LAN in Android

    6 janvier 2015, par Gowtham

    I am struggling to develop streaming client for DVR camera’s, I tried with VLC Media player through RTSP protocol I got the solution (used Wi-Fi standard model like, Netgear etc.,), but the same code is not supporting for other Wi-Fi Modem’s, now am working with FFMPEG framework to implement the streaming client in android using JNI API. Not getting any proper idea to implement JNI api

    Network Camera working with IP Cam Viewer App

    code below,

    /*****************************************************/
    /* functional call */
    /*****************************************************/

    jboolean Java_FFmpeg_allocateBuffer( JNIEnv* env, jobject thiz )
    {

       // Allocate an AVFrame structure
       pFrameRGB=avcodec_alloc_frame();
       if(pFrameRGB==NULL)
           return 0;
    sprintf(debugMsg, "%d %d", screenWidth, screenHeight);
    INFO(debugMsg);
       // Determine required buffer size and allocate buffer
       numBytes=avpicture_get_size(dstFmt, screenWidth, screenHeight);
    /*
       numBytes=avpicture_get_size(dstFmt, pCodecCtx->width,
                     pCodecCtx->height);
    */
       buffer=(uint8_t *)av_malloc(numBytes * sizeof(uint8_t));

       // Assign appropriate parts of buffer to image planes in pFrameRGB
       // Note that pFrameRGB is an AVFrame, but AVFrame is a superset
       // of AVPicture
       avpicture_fill((AVPicture *)pFrameRGB, buffer, dstFmt, screenWidth, screenHeight);

       return 1;
    }


    /* for each decoded frame */
    jbyteArray Java_FFmpeg_getNextDecodedFrame( JNIEnv* env, jobject thiz )
    {


    av_free_packet(&packet);

    while(av_read_frame(pFormatCtx, &packet)>=0) {

       if(packet.stream_index==videoStream) {

           avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);

           if(frameFinished) {    

           img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, screenWidth, screenHeight, dstFmt, SWS_BICUBIC, NULL, NULL, NULL);

    /*
    img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, dstFmt, SWS_BICUBIC, NULL, NULL, NULL);
    */

           sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize,
        0, pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize);

    ++frameCount;

           /* uint8_t == unsigned 8 bits == jboolean */
           jbyteArray nativePixels = (*env)->NewByteArray(env, numBytes);
           (*env)->SetByteArrayRegion(env, nativePixels, 0, numBytes, buffer);
           return nativePixels;
           }

       }

       av_free_packet(&packet);
    }

    return NULL;
    }

    /*****************************************************/
    /* / functional call */
    /*****************************************************/


    jstring
    Java_FFmpeg_play( JNIEnv* env, jobject thiz, jstring jfilePath )
    {
       INFO("--- Play");
    char* filePath = (char *)(*env)->GetStringUTFChars(env, jfilePath, NULL);
    RE(filePath);

    /*****************************************************/

     AVFormatContext *pFormatCtx;
     int             i, videoStream;
     AVCodecContext  *pCodecCtx;
     AVCodec         *pCodec;
     AVFrame         *pFrame;
     AVPacket        packet;
     int             frameFinished;
     float           aspect_ratio;
     struct SwsContext *img_convert_ctx;

    INFO(filePath);

    /* FFmpeg */

     av_register_all();

     if(av_open_input_file(&pFormatCtx, filePath, NULL, 0, NULL)!=0)
       RE("failed av_open_input_file ");

     if(av_find_stream_info(pFormatCtx)<0)
           RE("failed av_find_stream_info");

     videoStream=-1;
     for(i=0; inb_streams; i++)
       if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
         videoStream=i;
         break;
       }
     if(videoStream==-1)
           RE("failed videostream == -1");

     pCodecCtx=pFormatCtx->streams[videoStream]->codec;

     pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
     if(pCodec==NULL) {
       RE("Unsupported codec!");
     }

     if(avcodec_open(pCodecCtx, pCodec)<0)
       RE("failed codec_open");

     pFrame=avcodec_alloc_frame();

    /* /FFmpeg */

    INFO("codec name:");
    INFO(pCodec->name);
    INFO("Getting into stream decode:");

    /* video stream */

     i=0;
     while(av_read_frame(pFormatCtx, &packet)>=0) {

       if(packet.stream_index==videoStream) {
         avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
         if(frameFinished) {
    ++i;
    INFO("frame finished");

       AVPicture pict;
    /*
       img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,
    pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height,
    PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);

       sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize,
    0, pCodecCtx->height, pict.data, pict.linesize);
    */
         }
       }
       av_free_packet(&packet);
     }

    /* /video stream */

     av_free(pFrame);

     avcodec_close(pCodecCtx);

     av_close_input_file(pFormatCtx);

     RE("end of main");
    }

    I can’t able to get the frames from Network camera

    And give some idea to implement the live stream client for DVR camera in Android

  • Revision 1fc70e47cd : Adds code for corner detection and ransac This code is to start experiments wit

    7 février 2015, par Deb Mukherjee

    Changed Paths :
     Add /vp9/encoder/vp9_corner_detect.c


     Add /vp9/encoder/vp9_corner_detect.h


     Add /vp9/encoder/vp9_corner_match.c


     Add /vp9/encoder/vp9_corner_match.h


     Add /vp9/encoder/vp9_global_motion.c


     Add /vp9/encoder/vp9_global_motion.h


     Add /vp9/encoder/vp9_ransac.c


     Add /vp9/encoder/vp9_ransac.h


     Modify /vp9/vp9cx.mk



    Adds code for corner detection and ransac

    This code is to start experiments with global motion models.

    The corner detection can be either fast_9 or Harris.
    Corner matching is currently based on normalized correlation.
    Three flavors of ransac are used to estimate either a
    homography (8-param), or an affine model (6-param) or a
    rotation-zoom only affine model (4-param).

    The highest level API for the library is in vp9_global_motion.h,
    where there are two functions - one for computing a single model
    and another for computing multiple models up to a maximum number
    provided or until a desired inlier probability is achieved.

    Change-Id : I3f9788ec2dc0635cbc65f5c66c6ea8853cfcf2dd