
Recherche avancée
Médias (1)
-
La conservation du net art au musée. Les stratégies à l’œuvre
26 mai 2011
Mis à jour : Juillet 2013
Langue : français
Type : Texte
Autres articles (52)
-
Amélioration de la version de base
13 septembre 2013Jolie sélection multiple
Le plugin Chosen permet d’améliorer l’ergonomie des champs de sélection multiple. Voir les deux images suivantes pour comparer.
Il suffit pour cela d’activer le plugin Chosen (Configuration générale du site > Gestion des plugins), puis de configurer le plugin (Les squelettes > Chosen) en activant l’utilisation de Chosen dans le site public et en spécifiant les éléments de formulaires à améliorer, par exemple select[multiple] pour les listes à sélection multiple (...) -
Emballe médias : à quoi cela sert ?
4 février 2011, parCe plugin vise à gérer des sites de mise en ligne de documents de tous types.
Il crée des "médias", à savoir : un "média" est un article au sens SPIP créé automatiquement lors du téléversement d’un document qu’il soit audio, vidéo, image ou textuel ; un seul document ne peut être lié à un article dit "média" ; -
Menus personnalisés
14 novembre 2010, parMediaSPIP utilise le plugin Menus pour gérer plusieurs menus configurables pour la navigation.
Cela permet de laisser aux administrateurs de canaux la possibilité de configurer finement ces menus.
Menus créés à l’initialisation du site
Par défaut trois menus sont créés automatiquement à l’initialisation du site : Le menu principal ; Identifiant : barrenav ; Ce menu s’insère en général en haut de la page après le bloc d’entête, son identifiant le rend compatible avec les squelettes basés sur Zpip ; (...)
Sur d’autres sites (3723)
-
bitmap to yuv , video recorded has only green pixels
19 janvier 2016, par UserAxI am trying to convert a bitmap to yuv, and recording this yuv in the ffmpeg frame recorder...
I am getting the video output with only green pixels, though when i check the properties of this video it shows the set Frame rate and the resolution...The yuv encoding part is correct, but i feel i am making mistake somewhere else, mostly in returning the yuv bytes to recording part ( getByte(byte [] yuv ) because only there the yuv.length displayed in console is 0,, rest all methods return a big value in console ...
Kindly help...
@Override
public void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
directory.mkdirs();
addListenerOnButton();
play=(Button)findViewById(R.id.buttonplay);
stop=(Button)findViewById(R.id.buttonstop);
record=(Button)findViewById(R.id.buttonstart);
stop.setEnabled(false);
play.setEnabled(false);
record.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
startRecording();
getByte(new byte[]{});
}
});
stop.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
stopRecording();
}
});
play.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) throws IllegalArgumentException, SecurityException, IllegalStateException {
Intent intent = new Intent(Intent.ACTION_VIEW, Uri.parse(String.valueOf(asmileys)));
intent.setDataAndType(Uri.parse(String.valueOf(asmileys)), "video/mp4");
startActivity(intent);
Toast.makeText(getApplicationContext(), "Playing Video", Toast.LENGTH_LONG).show();
}
});
}
......//......
public void getByte(byte[] yuv) {
getNV21(640, 480, bitmap);
System.out.println(yuv.length + " ");
if (audioRecord == null || audioRecord.getRecordingState() != AudioRecord.RECORDSTATE_RECORDING) {
startTime = System.currentTimeMillis();
return;
}
if (RECORD_LENGTH > 0) {
int i = imagesIndex++ % images.length;
yuvimage = images[i];
timestamps[i] = 1000 * (System.currentTimeMillis() - startTime);
}
/* get video data */
if (yuvimage != null && recording) {
((ByteBuffer) yuvimage.image[0].position(0)).put(yuv);
if (RECORD_LENGTH <= 0) {
try {
long t = 1000 * (System.currentTimeMillis() - startTime);
if (t > recorder.getTimestamp()) {
recorder.setTimestamp(t);
}
recorder.record(yuvimage);
} catch (FFmpegFrameRecorder.Exception e) {
e.printStackTrace();
}
}
}
}
public byte [] getNV21(int inputWidth, int inputHeight, Bitmap bitmap) {
int[] argb = new int[inputWidth * inputHeight];
bitmap.getPixels(argb, 0, inputWidth, 0, 0, inputWidth, inputHeight);
byte[] yuv = new byte[inputWidth * inputHeight * 3 / 2];
encodeYUV420SP(yuv, argb, inputWidth, inputHeight);
bitmap.recycle();
System.out.println(yuv.length + " ");
return yuv;
}
void encodeYUV420SP(byte[] yuv420sp, int[] argb, int width, int height) {
final int frameSize = width * height;
int yIndex = 0;
int uIndex = frameSize;
int vIndex = frameSize;
System.out.println(yuv420sp.length + " " + frameSize);
int a, R, G, B, Y, U, V;
int index = 0;
for (int j = 0; j < height; j++) {
for (int i = 0; i < width; i++) {
a = (argb[index] & 0xff000000) >> 24; // a is not used obviously
R = (argb[index] & 0xff0000) >> 16;
G = (argb[index] & 0xff00) >> 8;
B = (argb[index] & 0xff) >> 0;
// well known RGB to YUV algorithm
Y = ((66 * R + 129 * G + 25 * B + 128) >> 8) + 16;
U = ((-38 * R - 74 * G + 112 * B + 128) >> 8) + 128;
V = ((112 * R - 94 * G - 18 * B + 128) >> 8) + 128;
// NV21 has a plane of Y and interleaved planes of VU each sampled by a factor of 2
// meaning for every 4 Y pixels there are 1 V and 1 U. Note the sampling is every other
// pixel AND every other scanline.
yuv420sp[yIndex++] = (byte) ((Y < 0) ? 0 : ((Y > 255) ? 255 : Y));
if (j % 2 == 0 && index % 2 == 0) {
yuv420sp[uIndex++] = (byte) ((U < 0) ? 0 : ((U > 255) ? 255 : U));
yuv420sp[vIndex++] = (byte) ((V < 0) ? 0 : ((V > 255) ? 255 : V));
}
index++;
}
}
}
.....//.....
public void addListenerOnButton() {
image = (ImageView) findViewById(R.id.imageView);
image.setDrawingCacheEnabled(true);
image.buildDrawingCache();
bitmap = image.getDrawingCache();
System.out.println(bitmap.getByteCount() + " " );
button = (Button) findViewById(R.id.btn1);
button.setOnClickListener(new OnClickListener() {
@Override
public void onClick(View view){
image.setImageResource(R.drawable.image1);
}
});
......//......EDIT 1 :
I made few changes in the above code :
record.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
startRecording();
getByte();
}
});
.....//....
public void getbyte() {
byte[] yuv = getNV21(640, 480, bitmap);So now in the console ; i get same yuv length in this method as the yuv length from getNV21 method..
But now i am getting half screen Black and Half screen green(black above and green below) pixels in the recorded video...
If i add these lines to onCreate method ;
image = (ImageView) findViewById(R.id.imageView);
image.setDrawingCacheEnabled(true);
image.buildDrawingCache();
bitmap = image.getDrawingCache();I do get distorted frames( frames are 1/4th of the image displayed with mix up of colors here and there) in the video....
All i am trying to learn is the image processing and flow of Bytes[] from one method to another ; but i am still a noob.. ;
Kindly help..!
-
ffmpeg "End mismatch 1" warning, jpeg2000 to avi
11 avril 2023, par jklebesTrying to convert a directory of jpeg2000 grayscale images to a video with ffmpeg, I get warnings


[0;36m[jpeg2000 @ 0x55d8fa1b68c0] [0m[0;33mEnd mismatch 1



(and lots of


Last message repeated <n> times
</n>


)


The command was


ffmpeg -y -r 10 -start_number 1 -i <path>/surface_30///img_000%01d.jp2 -vcodec msmpeg4 -vf scale=1920:-1 -q:v 8 <path>//surface_30///surface_30.avi
</path></path>


The output is


ffmpeg version 4.2.2 Copyright (c) 2000-2019 the FFmpeg developers
 built with gcc 7.3.0 (crosstool-NG 1.23.0.449-a04d0)
 configuration: --prefix=/home/jklebes001/miniconda3 --cc=/tmp/build/80754af9/ffmpeg_1587154242452/_build_env/bin/x86_64-conda_cos6-linux-gnu-cc --disable-doc --enable-avresample --enable-gmp --enable-hardcoded-tables --enable-libfreetype --enable-libvpx --enable-pthreads --enable-libopus --enable-postproc --enable-pic --enable-pthreads --enable-shared --enable-static --enable-version3 --enable-zlib --enable-libmp3lame --disable-nonfree --enable-gpl --enable-gnutls --disable-openssl --enable-libopenh264 --enable-libx264
 libavutil 56. 31.100 / 56. 31.100
 libavcodec 58. 54.100 / 58. 54.100
 libavformat 58. 29.100 / 58. 29.100
 libavdevice 58. 8.100 / 58. 8.100
 libavfilter 7. 57.100 / 7. 57.100
 libavresample 4. 0. 0 / 4. 0. 0
 libswscale 5. 5.100 / 5. 5.100
 libswresample 3. 5.100 / 3. 5.100
 libpostproc 55. 5.100 / 55. 5.100
[0;36m[jpeg2000 @ 0x55cb44144480] [0m[0;33mEnd mismatch 1

[0m Last message repeated 1 times
 Last message repeated 2 times
 Last message repeated 3 times



...


Last message repeated 73 times

Input #0, image2, from '<path>//surface_30///img_000%01d.jp2':

 Duration: 00:00:00.20, start: 0.000000, bitrate: N/A

 Stream #0:0: Video: jpeg2000, gray, 6737x4869, 25 tbr, 25 tbn, 25 tbc

Stream mapping:

 Stream #0:0 -> #0:0 (jpeg2000 (native) -> msmpeg4v3 (msmpeg4))

Press [q] to stop, [?] for help

[0;36m[jpeg2000 @ 0x55cb4418e200] [0m[0;33mEnd mismatch 1

[0m[0;36m[jpeg2000 @ 0x55cb441900c0] [0m[0;33mEnd mismatch 1
</path>


...


(about 600 lines of "end mismatch" and "last message repeated" cut)


...


[0m[0;36m[jpeg2000 @ 0x55cb4418e8c0] [0m[0;33mEnd mismatch 1

[0mOutput #0, avi, to '<path>/surface_30///surface_30.avi':

 Metadata:

 ISFT : Lavf58.29.100

 Stream #0:0: Video: msmpeg4v3 (msmpeg4) (MP43 / 0x3334504D), yuv420p, 1920x1388, q=2-31, 200 kb/s, 10 fps, 10 tbn, 10 tbc

 Metadata:

 encoder : Lavc58.54.100 msmpeg4

 Side data:

 cpb: bitrate max/min/avg: 0/0/200000 buffer size: 0 vbv_delay: -1

frame= 2 fps=0.8 q=8.0 size= 6kB time=00:00:00.20 bitrate= 227.1kbits/s speed=0.0844x 
frame= 5 fps=1.7 q=8.0 size= 6kB time=00:00:00.50 bitrate= 90.8kbits/s speed=0.172x 
frame= 5 fps=1.7 q=8.0 Lsize= 213kB time=00:00:00.50 bitrate=3494.7kbits/s speed=0.172x 
video:208kB audio:0kB subtitle:0kB other streams:0kB global headers:0kB muxing overhead: 2.732246%
</path>


What is the meaning of characters like [0 ;33m here ?


I thought it might have something to do with bit depth and color format. Setting
-pix_fmt gray
had no effect, and indeed the format of the jp2 images is already detected as 8-bit gray.

The output .avi exists and seems fine.


The line was previously used on jpeg files and works fine on jpeg. With jpeg, the output has the line


Input #0, image2, from '<path>/surface_30///img_000%01d.jpeg':

 Duration: 00:00:00.16, start: 0.000000, bitrate: N/A

 Stream #0:0: Video: mjpeg (Baseline), gray(bt470bg/unknown/unknown), 6737x4869 [SAR 1:1 DAR 6737:4869], 25 tbr, 25 tbn, 25 tbc

Stream mapping:

 Stream #0:0 -> #0:0 (mjpeg (native) -> msmpeg4v3 (msmpeg4))

Press [q] to stop, [?] for help

Output #0, avi, to '<path>/surface_30///surface_30.avi':

 Metadata:

 ISFT : Lavf58.29.100

 Stream #0:0: Video: msmpeg4v3 (msmpeg4) (MP43 / 0x3334504D), yuv420p, 6737x4869 [SAR 1:1 DAR 6737:4869], q=2-31, 200 kb/s, 10 fps, 10 tbn, 10 tbc

 Metadata:

 encoder : Lavc58.54.100 msmpeg4

 Side data:

 cpb: bitrate max/min/avg: 0/0/200000 buffer size: 0 vbv_delay: -1

frame= 2 fps=0.0 q=8.0 size= 6662kB time=00:00:00.20 bitrate=272859.9kbits/s speed=0.334x 
frame= 3 fps=2.2 q=10.0 size= 10502kB time=00:00:00.30 bitrate=286764.2kbits/s speed=0.22x 
frame= 4 fps=1.9 q=12.3 size= 13574kB time=00:00:00.40 bitrate=277987.7kbits/s speed=0.19x 
frame= 4 fps=1.4 q=12.3 size= 13574kB time=00:00:00.40 bitrate=277987.7kbits/s speed=0.145x 
frame= 4 fps=1.4 q=12.3 Lsize= 13657kB time=00:00:00.40 bitrate=279702.3kbits/s speed=0.145x 
video:13652kB audio:0kB subtitle:0kB other streams:0kB global headers:0kB muxing overhead: 0.041926%
</path></path>


detecting mjpeg format and similar, but more detailed format
gray(bt470bg/unknown/unknown), 6737x4869 [SAR 1:1 DAR 6737:4869].


What is the difference when switching input to jp2 ?


-
Decoding an MKA audio file into raw data (MKA Audio to raw audio data)
9 octobre 2020, par bbddMy task is to open an existing audio file with the
mka
extension (Matroska container) and extract the raw audio data. This example shows only an example of extracting raw data from anmp2
file. I do not know how to do this with themka
container. I would like to have something like this :

UPD


I found an option to save audio data in the format in which it was recorded in the audio file. An example is shown below.


PS. This is only a test version and most likely there are memory leaks and other problems.



#include <qfile>
#include <qdebug>
#include "audiodecoder.h"

int main(int argc, char* argv[])
{
 AudioDecoder decoder("/home/test/test.mka");
 bool started = decoder.start();
 if (!started) {
 return EXIT_FAILURE;
 }

 QFile file("/home/test/rawData.bin");
 file.open(QIODevice::WriteOnly);

 while (true) {
 auto data = decoder.getData(255);
 if (data.isEmpty()) {
 break;
 }
 file.write(data.data(), data.size());
 }
 file.close();
 return EXIT_SUCCESS;
}

</qdebug></qfile>


audiodecoder.h


class AudioDecoder {
public:
 AudioDecoder(const QString& fileName);
 AudioDecoder& operator=(const AudioDecoder& rhs) = delete;
 AudioDecoder& operator=(AudioDecoder&& rhs) = delete;
 AudioDecoder(const AudioDecoder& rhs) = delete;
 AudioDecoder(AudioDecoder&& rhs) = delete;
 virtual ~AudioDecoder(void);

 virtual bool start(void) noexcept;
 virtual QByteArray getData(const quint16& size) noexcept;
 virtual bool stop(void) noexcept;

protected:
 bool m_initialized;
 QString m_fileName;

 AVFrame* p_frame = nullptr;
 AVPacket* p_packet = nullptr;
 AVCodecContext* p_cdcCtx = nullptr;
 AVFormatContext* p_frmCtx = nullptr;
};



audiodecoder.cpp



static void logging(const char* message)
{
 qDebug() << message;
}

AudioDecoder::AudioDecoder(const QString& fileName)
 : m_initialized(false)
 , m_fileName(fileName)
 , p_cdcCtx(nullptr)
 , p_frmCtx(nullptr)
{
 av_register_all();
}

QByteArray AudioDecoder::getData(const quint16& dataSize) noexcept
{
 QByteArray data;
 qint32 response = 0;
 if (av_read_frame(p_frmCtx, p_packet) >= 0) {
 //logging(QString("AVPacket->pts %1").arg(p_packet->pts).toStdString().c_str());
 //response = decode_packet(p_packet, p_cdcCtx, p_frame);
 response = avcodec_send_packet(p_cdcCtx, p_packet);
 if (response < 0) {
 logging("Error while sending a packet to the decoder");
 return {};
 }
 while (response >= 0) {
 response = avcodec_receive_frame(p_cdcCtx, p_frame);
 if (response == AVERROR(EAGAIN) || response == AVERROR_EOF) {
 break;
 }
 else if (response < 0) {
 logging("Error while receiving a frame from the decoder");
 return {};
 }
 if (response >= 0) {
 logging(QString("Frame %1 (type=%2, size=%3 bytes) pts %4 key_frame %5 [DTS %6], duration[%7]")
 .arg(p_cdcCtx->frame_number)
 .arg(av_get_picture_type_char(p_frame->pict_type))
 .arg(p_frame->pkt_size)
 .arg(p_frame->pts)
 .arg(p_frame->key_frame)
 .arg(p_frame->coded_picture_number)
 .arg(p_frame->pkt_duration)
 .toStdString()
 .c_str());

 for (int i = 0; i < p_frame->linesize[0]; ++i) {
 data.push_back(p_frame->data[0][i]);
 }
 }
 }
 av_packet_unref(p_packet);
 return data;
 }
 return {};
}

bool AudioDecoder::start(void) noexcept
{
 if (m_initialized) {
 return true;
 }

 int error;
 // Open the input file to read from it.
 if ((error = avformat_open_input(&p_frmCtx,
 m_fileName.toStdString().c_str(), nullptr, nullptr))
 < 0) {
 qDebug() << "Could not open input file: " << m_fileName;
 p_frmCtx = nullptr;
 return false;
 }
 // Get information on the input file (number of streams etc.).
 if ((error = avformat_find_stream_info(p_frmCtx, nullptr)) < 0) {
 avformat_close_input(&p_frmCtx);
 qDebug() << __LINE__;
 return false;
 }
 // Make sure that there is only one stream in the input file.
 if ((p_frmCtx)->nb_streams != 1) {
 avformat_close_input(&p_frmCtx);
 qDebug() << __LINE__;
 return false;
 }

 if (p_frmCtx->streams[0]->codecpar->codec_type != AVMEDIA_TYPE_AUDIO) {
 avformat_close_input(&p_frmCtx);
 qDebug() << __LINE__;
 return false;
 }

 // Find a decoder for the audio stream.
 AVCodec* input_codec = nullptr;
 if (!(input_codec = avcodec_find_decoder((p_frmCtx)->streams[0]->codecpar->codec_id))) {
 avformat_close_input(&p_frmCtx);
 qDebug() << __LINE__;
 return false;
 }
 // Allocate a new decoding context.
 AVCodecContext* avctx = avcodec_alloc_context3(input_codec);
 if (!avctx) {
 avformat_close_input(&p_frmCtx);
 qDebug() << __LINE__;
 return false;
 }
 // Initialize the stream parameters with demuxer information.
 error = avcodec_parameters_to_context(avctx, (p_frmCtx)->streams[0]->codecpar);
 if (error < 0) {
 avformat_close_input(&p_frmCtx);
 avcodec_free_context(&avctx);
 qDebug() << __LINE__;
 return false;
 }
 /* Open the decoder for the audio stream to use it later. */
 if ((error = avcodec_open2(avctx, input_codec, NULL)) < 0) {
 avcodec_free_context(&avctx);
 avformat_close_input(&p_frmCtx);
 qDebug() << __LINE__;
 return false;
 }
 /* Save the decoder context for easier access later. */
 p_cdcCtx = avctx;
 av_dump_format(p_frmCtx, 0, m_fileName.toStdString().c_str(), 0);

 p_frame = av_frame_alloc();
 if (!p_frame) {
 logging("failed to allocated memory for AVFrame");
 return false;
 }
 p_packet = av_packet_alloc();
 if (!p_packet) {
 logging("failed to allocated memory for AVPacket");
 return false;
 }
 return m_initialized = true;
}

bool AudioDecoder::stop(void) noexcept
{
 if (p_cdcCtx != nullptr) {
 avcodec_free_context(&p_cdcCtx);
 }
 if (p_frmCtx != nullptr) {
 avformat_close_input(&p_frmCtx);
 }
 return true;
}

AudioDecoder::~AudioDecoder(void)
{
 stop();
}



But the problem in this example is that I didn't implement the ability to get exactly the requested size of audio data. In my case, it's just ignored.