
Recherche avancée
Autres articles (95)
-
Emballe médias : à quoi cela sert ?
4 février 2011, parCe plugin vise à gérer des sites de mise en ligne de documents de tous types.
Il crée des "médias", à savoir : un "média" est un article au sens SPIP créé automatiquement lors du téléversement d’un document qu’il soit audio, vidéo, image ou textuel ; un seul document ne peut être lié à un article dit "média" ; -
Gestion des droits de création et d’édition des objets
8 février 2011, parPar défaut, beaucoup de fonctionnalités sont limitées aux administrateurs mais restent configurables indépendamment pour modifier leur statut minimal d’utilisation notamment : la rédaction de contenus sur le site modifiables dans la gestion des templates de formulaires ; l’ajout de notes aux articles ; l’ajout de légendes et d’annotations sur les images ;
-
Supporting all media types
13 avril 2011, parUnlike most software and media-sharing platforms, MediaSPIP aims to manage as many different media types as possible. The following are just a few examples from an ever-expanding list of supported formats : images : png, gif, jpg, bmp and more audio : MP3, Ogg, Wav and more video : AVI, MP4, OGV, mpg, mov, wmv and more text, code and other data : OpenOffice, Microsoft Office (Word, PowerPoint, Excel), web (html, CSS), LaTeX, Google Earth and (...)
Sur d’autres sites (4425)
-
Recording a video using MediaRecorder
21 juillet 2016, par Cédric PortmannI am currently using the TextureFromCameraActivity from Grafika to record a video in square ( 1:1 ) resolution. Therefor I the GLES20.glViewport so that the video gets moved to the top and it appears to be squared. Now I would like to record this square view using the MediaRecorder or at least record the camera with normal resolutiona and then crop it using FFmpeg. However I get the same error over and over again and I cant figure out why.
The error I get :
start called in an invalid state : 4
And yes I added all the necessary permissions.
android.permission.WRITE_EXTERNAL_STORAGE android.permission.CAMERA
android.permission.RECORD_VIDEO android.permission.RECORD_AUDIO
android.permission.STORAGE android.permission.READ_EXTERNAL_STORAGEHere the modified code :
https://github.com/google/grafika
Thanks for your help :D
package com.android.grafika;
import android.graphics.SurfaceTexture;
import android.hardware.Camera;
import android.media.CamcorderProfile;
import android.media.MediaRecorder;
import android.opengl.GLES20;
import android.opengl.Matrix;
import android.os.Bundle;
import android.os.Environment;
import android.os.Handler;
import android.os.Looper;
import android.os.Message;
import android.util.Log;
import android.view.MotionEvent;
import android.view.Surface;
import android.view.SurfaceHolder;
import android.view.SurfaceView;
import android.view.View;
import android.widget.Button;
import android.widget.SeekBar;
import android.widget.TextView;
import android.app.Activity;
import android.widget.Toast;
import com.android.grafika.gles.Drawable2d;
import com.android.grafika.gles.EglCore;
import com.android.grafika.gles.GlUtil;
import com.android.grafika.gles.Sprite2d;
import com.android.grafika.gles.Texture2dProgram;
import com.android.grafika.gles.WindowSurface;
import java.io.File;
import java.io.IOException;
import java.lang.ref.WeakReference;
public class TextureFromCameraActivity extends Activity implements View.OnClickListener, SurfaceHolder.Callback,
SeekBar.OnSeekBarChangeListener {
private static final int DEFAULT_ZOOM_PERCENT = 0; // 0-100
private static final int DEFAULT_SIZE_PERCENT = 80; // 0-100
private static final int DEFAULT_ROTATE_PERCENT = 75; // 0-100
// Requested values; actual may differ.
private static final int REQ_CAMERA_WIDTH = 720;
private static final int REQ_CAMERA_HEIGHT = 720;
private static final int REQ_CAMERA_FPS = 30;
// The holder for our SurfaceView. The Surface can outlive the Activity (e.g. when
// the screen is turned off and back on with the power button).
//
// This becomes non-null after the surfaceCreated() callback is called, and gets set
// to null when surfaceDestroyed() is called.
private static SurfaceHolder sSurfaceHolder;
// Thread that handles rendering and controls the camera. Started in onResume(),
// stopped in onPause().
private RenderThread mRenderThread;
// Receives messages from renderer thread.
private MainHandler mHandler;
// User controls.
private SeekBar mZoomBar;
private SeekBar mSizeBar;
private SeekBar mRotateBar;
// These values are passed to us by the camera/render thread, and displayed in the UI.
// We could also just peek at the values in the RenderThread object, but we'd need to
// synchronize access carefully.
private int mCameraPreviewWidth, mCameraPreviewHeight;
private float mCameraPreviewFps;
private int mRectWidth, mRectHeight;
private int mZoomWidth, mZoomHeight;
private int mRotateDeg;
SurfaceHolder sh;
MediaRecorder recorder;
SurfaceHolder holder;
boolean recording = false;
public static final String TAG = "VIDEOCAPTURE";
private static final File OUTPUT_DIR = Environment.getExternalStorageDirectory();
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
recorder = new MediaRecorder();
setContentView(R.layout.activity_texture_from_camera);
mHandler = new MainHandler(this);
SurfaceView cameraView = (SurfaceView) findViewById(R.id.cameraOnTexture_surfaceView);
sh = cameraView.getHolder();
cameraView.setClickable(true);// make the surface view clickable
sh.addCallback(this);
//prepareRecorder();
mZoomBar = (SeekBar) findViewById(R.id.tfcZoom_seekbar);
mSizeBar = (SeekBar) findViewById(R.id.tfcSize_seekbar);
mRotateBar = (SeekBar) findViewById(R.id.tfcRotate_seekbar);
mZoomBar.setProgress(DEFAULT_ZOOM_PERCENT);
mSizeBar.setProgress(DEFAULT_SIZE_PERCENT);
mRotateBar.setProgress(DEFAULT_ROTATE_PERCENT);
mZoomBar.setOnSeekBarChangeListener(this);
mSizeBar.setOnSeekBarChangeListener(this);
mRotateBar.setOnSeekBarChangeListener(this);
Button record_btn = (Button)findViewById(R.id.button);
record_btn.setOnClickListener(this);
initRecorder();
updateControls();
}
@Override
protected void onResume() {
Log.d(TAG, "onResume BEGIN");
super.onResume();
mRenderThread = new RenderThread(mHandler);
mRenderThread.setName("TexFromCam Render");
mRenderThread.start();
mRenderThread.waitUntilReady();
RenderHandler rh = mRenderThread.getHandler();
rh.sendZoomValue(mZoomBar.getProgress());
rh.sendSizeValue(mSizeBar.getProgress());
rh.sendRotateValue(mRotateBar.getProgress());
if (sSurfaceHolder != null) {
Log.d(TAG, "Sending previous surface");
rh.sendSurfaceAvailable(sSurfaceHolder, false);
} else {
Log.d(TAG, "No previous surface");
}
Log.d(TAG, "onResume END");
}
@Override
protected void onPause() {
Log.d(TAG, "onPause BEGIN");
super.onPause();
RenderHandler rh = mRenderThread.getHandler();
rh.sendShutdown();
try {
mRenderThread.join();
} catch (InterruptedException ie) {
// not expected
throw new RuntimeException("join was interrupted", ie);
}
mRenderThread = null;
Log.d(TAG, "onPause END");
}
@Override // SurfaceHolder.Callback
public void surfaceCreated(SurfaceHolder holder) {
Log.d(TAG, "surfaceCreated holder=" + holder + " (static=" + sSurfaceHolder + ")");
if (sSurfaceHolder != null) {
throw new RuntimeException("sSurfaceHolder is already set");
}
sSurfaceHolder = holder;
if (mRenderThread != null) {
// Normal case -- render thread is running, tell it about the new surface.
RenderHandler rh = mRenderThread.getHandler();
rh.sendSurfaceAvailable(holder, true);
} else {
// Sometimes see this on 4.4.x N5: power off, power on, unlock, with device in
// landscape and a lock screen that requires portrait. The surface-created
// message is showing up after onPause().
//
// Chances are good that the surface will be destroyed before the activity is
// unpaused, but we track it anyway. If the activity is un-paused and we start
// the RenderThread, the SurfaceHolder will be passed in right after the thread
// is created.
Log.d(TAG, "render thread not running");
}
recorder.setPreviewDisplay(holder.getSurface());
}
@Override // SurfaceHolder.Callback
public void surfaceChanged(SurfaceHolder holder, int format, int width, int height) {
Log.d(TAG, "surfaceChanged fmt=" + format + " size=" + width + "x" + height +
" holder=" + holder);
if (mRenderThread != null) {
RenderHandler rh = mRenderThread.getHandler();
rh.sendSurfaceChanged(format, width, height);
} else {
Log.d(TAG, "Ignoring surfaceChanged");
return;
}
}
@Override // SurfaceHolder.Callback
public void surfaceDestroyed(SurfaceHolder holder) {
// In theory we should tell the RenderThread that the surface has been destroyed.
if (mRenderThread != null) {
RenderHandler rh = mRenderThread.getHandler();
rh.sendSurfaceDestroyed();
}
Log.d(TAG, "surfaceDestroyed holder=" + holder);
sSurfaceHolder = null;
}
@Override // SeekBar.OnSeekBarChangeListener
public void onProgressChanged(SeekBar seekBar, int progress, boolean fromUser) {
if (mRenderThread == null) {
// Could happen if we programmatically update the values after setting a listener
// but before starting the thread. Also, easy to cause this by scrubbing the seek
// bar with one finger then tapping "recents" with another.
Log.w(TAG, "Ignoring onProgressChanged received w/o RT running");
return;
}
RenderHandler rh = mRenderThread.getHandler();
// "progress" ranges from 0 to 100
if (seekBar == mZoomBar) {
//Log.v(TAG, "zoom: " + progress);
rh.sendZoomValue(progress);
} else if (seekBar == mSizeBar) {
//Log.v(TAG, "size: " + progress);
rh.sendSizeValue(progress);
} else if (seekBar == mRotateBar) {
//Log.v(TAG, "rotate: " + progress);
rh.sendRotateValue(progress);
} else {
throw new RuntimeException("unknown seek bar");
}
// If we're getting preview frames quickly enough we don't really need this, but
// we don't want to have chunky-looking resize movement if the camera is slow.
// OTOH, if we get the updates too quickly (60fps camera?), this could jam us
// up and cause us to run behind. So use with caution.
rh.sendRedraw();
}
@Override // SeekBar.OnSeekBarChangeListener
public void onStartTrackingTouch(SeekBar seekBar) {}
@Override // SeekBar.OnSeekBarChangeListener
public void onStopTrackingTouch(SeekBar seekBar) {}
@Override
/**
* Handles any touch events that aren't grabbed by one of the controls.
*/
public boolean onTouchEvent(MotionEvent e) {
float x = e.getX();
float y = e.getY();
switch (e.getAction()) {
case MotionEvent.ACTION_MOVE:
case MotionEvent.ACTION_DOWN:
//Log.v(TAG, "onTouchEvent act=" + e.getAction() + " x=" + x + " y=" + y);
if (mRenderThread != null) {
RenderHandler rh = mRenderThread.getHandler();
rh.sendPosition((int) x, (int) y);
// Forcing a redraw can cause sluggish-looking behavior if the touch
// events arrive quickly.
//rh.sendRedraw();
}
break;
default:
break;
}
return true;
}
/**
* Updates the current state of the controls.
*/
private void updateControls() {
String str = getString(R.string.tfcCameraParams, mCameraPreviewWidth,
mCameraPreviewHeight, mCameraPreviewFps);
TextView tv = (TextView) findViewById(R.id.tfcCameraParams_text);
tv.setText(str);
str = getString(R.string.tfcRectSize, mRectWidth, mRectHeight);
tv = (TextView) findViewById(R.id.tfcRectSize_text);
tv.setText(str);
str = getString(R.string.tfcZoomArea, mZoomWidth, mZoomHeight);
tv = (TextView) findViewById(R.id.tfcZoomArea_text);
tv.setText(str);
}
@Override
public void onClick(View view) {
if (recording) {
recorder.stop();
recording = false;
// Let's initRecorder so we can record again
initRecorder();
prepareRecorder();
} else {
recording = true;
recorder.start();
}
}
private void initRecorder() {
recorder.setAudioSource(MediaRecorder.AudioSource.DEFAULT);
recorder.setVideoSource(MediaRecorder.VideoSource.DEFAULT);
CamcorderProfile cpHigh = CamcorderProfile
.get(CamcorderProfile.QUALITY_HIGH);
recorder.setProfile(cpHigh);
String path = Environment.getExternalStorageDirectory() + File.separator
+ Environment.DIRECTORY_DCIM + File.separator + "AlphaRun";
recorder.setOutputFile(path);
recorder.setMaxDuration(50000); // 50 seconds
recorder.setMaxFileSize(5000000); // Approximately 5 megabytes
}
private void prepareRecorder() {
try {
recorder.prepare();
} catch (IllegalStateException e) {
e.printStackTrace();
finish();
} catch (IOException e) {
e.printStackTrace();
finish();
}
}
/**
* Thread that handles all rendering and camera operations.
*/
private static class RenderThread extends Thread implements
SurfaceTexture.OnFrameAvailableListener {
// Object must be created on render thread to get correct Looper, but is used from
// UI thread, so we need to declare it volatile to ensure the UI thread sees a fully
// constructed object.
private volatile RenderHandler mHandler;
// Used to wait for the thread to start.
private Object mStartLock = new Object();
private boolean mReady = false;
private MainHandler mMainHandler;
private Camera mCamera;
private int mCameraPreviewWidth, mCameraPreviewHeight;
private EglCore mEglCore;
private WindowSurface mWindowSurface;
private int mWindowSurfaceWidth;
private int mWindowSurfaceHeight;
// Receives the output from the camera preview.
private SurfaceTexture mCameraTexture;
// Orthographic projection matrix.
private float[] mDisplayProjectionMatrix = new float[16];
private Texture2dProgram mTexProgram;
private final ScaledDrawable2d mRectDrawable =
new ScaledDrawable2d(Drawable2d.Prefab.RECTANGLE);
private final Sprite2d mRect = new Sprite2d(mRectDrawable);
private int mZoomPercent = DEFAULT_ZOOM_PERCENT;
private int mSizePercent = DEFAULT_SIZE_PERCENT;
private int mRotatePercent = DEFAULT_ROTATE_PERCENT;
private float mPosX, mPosY;
/**
* Constructor. Pass in the MainHandler, which allows us to send stuff back to the
* Activity.
*/
public RenderThread(MainHandler handler) {
mMainHandler = handler;
}
/**
* Thread entry point.
*/
@Override
public void run() {
Looper.prepare();
// We need to create the Handler before reporting ready.
mHandler = new RenderHandler(this);
synchronized (mStartLock) {
mReady = true;
mStartLock.notify(); // signal waitUntilReady()
}
// Prepare EGL and open the camera before we start handling messages.
mEglCore = new EglCore(null, 0);
openCamera(REQ_CAMERA_WIDTH, REQ_CAMERA_HEIGHT, REQ_CAMERA_FPS);
Looper.loop();
Log.d(TAG, "looper quit");
releaseCamera();
releaseGl();
mEglCore.release();
synchronized (mStartLock) {
mReady = false;
}
}
/**
* Waits until the render thread is ready to receive messages.
* <p>
* Call from the UI thread.
*/
public void waitUntilReady() {
synchronized (mStartLock) {
while (!mReady) {
try {
mStartLock.wait();
} catch (InterruptedException ie) { /* not expected */ }
}
}
}
/**
* Shuts everything down.
*/
private void shutdown() {
Log.d(TAG, "shutdown");
Looper.myLooper().quit();
}
/**
* Returns the render thread's Handler. This may be called from any thread.
*/
public RenderHandler getHandler() {
return mHandler;
}
/**
* Handles the surface-created callback from SurfaceView. Prepares GLES and the Surface.
*/
private void surfaceAvailable(SurfaceHolder holder, boolean newSurface) {
Surface surface = holder.getSurface();
mWindowSurface = new WindowSurface(mEglCore, surface, false);
mWindowSurface.makeCurrent();
// Create and configure the SurfaceTexture, which will receive frames from the
// camera. We set the textured rect's program to render from it.
mTexProgram = new Texture2dProgram(Texture2dProgram.ProgramType.TEXTURE_EXT);
int textureId = mTexProgram.createTextureObject();
mCameraTexture = new SurfaceTexture(textureId);
mRect.setTexture(textureId);
if (!newSurface) {
// This Surface was established on a previous run, so no surfaceChanged()
// message is forthcoming. Finish the surface setup now.
//
// We could also just call this unconditionally, and perhaps do an unnecessary
// bit of reallocating if a surface-changed message arrives.
mWindowSurfaceWidth = mWindowSurface.getWidth();
mWindowSurfaceHeight = mWindowSurface.getWidth();
finishSurfaceSetup();
}
mCameraTexture.setOnFrameAvailableListener(this);
}
/**
* Releases most of the GL resources we currently hold (anything allocated by
* surfaceAvailable()).
* </p><p>
* Does not release EglCore.
*/
private void releaseGl() {
GlUtil.checkGlError("releaseGl start");
if (mWindowSurface != null) {
mWindowSurface.release();
mWindowSurface = null;
}
if (mTexProgram != null) {
mTexProgram.release();
mTexProgram = null;
}
GlUtil.checkGlError("releaseGl done");
mEglCore.makeNothingCurrent();
}
/**
* Handles the surfaceChanged message.
* </p><p>
* We always receive surfaceChanged() after surfaceCreated(), but surfaceAvailable()
* could also be called with a Surface created on a previous run. So this may not
* be called.
*/
private void surfaceChanged(int width, int height) {
Log.d(TAG, "RenderThread surfaceChanged " + width + "x" + height);
mWindowSurfaceWidth = width;
mWindowSurfaceHeight = width;
finishSurfaceSetup();
}
/**
* Handles the surfaceDestroyed message.
*/
private void surfaceDestroyed() {
// In practice this never appears to be called -- the activity is always paused
// before the surface is destroyed. In theory it could be called though.
Log.d(TAG, "RenderThread surfaceDestroyed");
releaseGl();
}
/**
* Sets up anything that depends on the window size.
* </p><p>
* Open the camera (to set mCameraAspectRatio) before calling here.
*/
private void finishSurfaceSetup() {
int width = mWindowSurfaceWidth;
int height = mWindowSurfaceHeight;
Log.d(TAG, "finishSurfaceSetup size=" + width + "x" + height +
" camera=" + mCameraPreviewWidth + "x" + mCameraPreviewHeight);
// Use full window.
GLES20.glViewport(0, 700, width, height);
// Simple orthographic projection, with (0,0) in lower-left corner.
Matrix.orthoM(mDisplayProjectionMatrix, 0, 0, width, 0, height, -1, 1);
// Default position is center of screen.
mPosX = width / 2.0f;
mPosY = height / 2.0f;
updateGeometry();
// Ready to go, start the camera.
Log.d(TAG, "starting camera preview");
try {
mCamera.setPreviewTexture(mCameraTexture);
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
mCamera.startPreview();
}
/**
* Updates the geometry of mRect, based on the size of the window and the current
* values set by the UI.
*/
private void updateGeometry() {
int width = mWindowSurfaceWidth;
int height = mWindowSurfaceHeight;
int smallDim = Math.min(width, height);
// Max scale is a bit larger than the screen, so we can show over-size.
float scaled = smallDim * (mSizePercent / 100.0f) * 1.25f;
float cameraAspect = (float) mCameraPreviewWidth / mCameraPreviewHeight;
int newWidth = Math.round(scaled * cameraAspect);
int newHeight = Math.round(scaled);
float zoomFactor = 1.0f - (mZoomPercent / 100.0f);
int rotAngle = Math.round(360 * (mRotatePercent / 100.0f));
mRect.setScale(newWidth, newHeight);
mRect.setPosition(mPosX, mPosY);
mRect.setRotation(rotAngle);
mRectDrawable.setScale(zoomFactor);
mMainHandler.sendRectSize(newWidth, newHeight);
mMainHandler.sendZoomArea(Math.round(mCameraPreviewWidth * zoomFactor),
Math.round(mCameraPreviewHeight * zoomFactor));
mMainHandler.sendRotateDeg(rotAngle);
}
@Override // SurfaceTexture.OnFrameAvailableListener; runs on arbitrary thread
public void onFrameAvailable(SurfaceTexture surfaceTexture) {
mHandler.sendFrameAvailable();
}
/**
* Handles incoming frame of data from the camera.
*/
private void frameAvailable() {
mCameraTexture.updateTexImage();
draw();
}
/**
* Draws the scene and submits the buffer.
*/
private void draw() {
GlUtil.checkGlError("draw start");
GLES20.glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
GLES20.glClear(GLES20.GL_COLOR_BUFFER_BIT);
mRect.draw(mTexProgram, mDisplayProjectionMatrix);
mWindowSurface.swapBuffers();
GlUtil.checkGlError("draw done");
}
/**
* Opens a camera, and attempts to establish preview mode at the specified width
* and height with a fixed frame rate.
* </p><p>
* Sets mCameraPreviewWidth / mCameraPreviewHeight.
*/
private void openCamera(int desiredWidth, int desiredHeight, int desiredFps) {
if (mCamera != null) {
throw new RuntimeException("camera already initialized");
}
Camera.CameraInfo info = new Camera.CameraInfo();
// Try to find a front-facing camera (e.g. for videoconferencing).
int numCameras = Camera.getNumberOfCameras();
for (int i = 0; i < numCameras; i++) {
Camera.getCameraInfo(i, info);
if (info.facing == Camera.CameraInfo.CAMERA_FACING_BACK) {
mCamera = Camera.open(i);
break;
}
}
if (mCamera == null) {
Log.d(TAG, "No front-facing camera found; opening default");
mCamera = Camera.open(); // opens first back-facing camera
}
if (mCamera == null) {
throw new RuntimeException("Unable to open camera");
}
Camera.Parameters parms = mCamera.getParameters();
CameraUtils.choosePreviewSize(parms, desiredWidth, desiredHeight);
parms.setFocusMode(Camera.Parameters.FOCUS_MODE_CONTINUOUS_PICTURE);
// Try to set the frame rate to a constant value.
int thousandFps = CameraUtils.chooseFixedPreviewFps(parms, desiredFps * 1000);
// Give the camera a hint that we're recording video. This can have a big
// impact on frame rate.
parms.setRecordingHint(true);
mCamera.setParameters(parms);
int[] fpsRange = new int[2];
Camera.Size mCameraPreviewSize = parms.getPreviewSize();
parms.getPreviewFpsRange(fpsRange);
String previewFacts = mCameraPreviewSize.width + "x" + mCameraPreviewSize.height;
if (fpsRange[0] == fpsRange[1]) {
previewFacts += " @" + (fpsRange[0] / 1000.0) + "fps";
} else {
previewFacts += " @[" + (fpsRange[0] / 1000.0) +
" - " + (fpsRange[1] / 1000.0) + "] fps";
}
Log.i(TAG, "Camera config: " + previewFacts);
mCameraPreviewWidth = mCameraPreviewSize.width;
mCameraPreviewHeight = mCameraPreviewSize.height;
mMainHandler.sendCameraParams(mCameraPreviewWidth, mCameraPreviewHeight,
thousandFps / 1000.0f);
}
/**
* Stops camera preview, and releases the camera to the system.
*/
private void releaseCamera() {
if (mCamera != null) {
mCamera.stopPreview();
mCamera.release();
mCamera = null;
Log.d(TAG, "releaseCamera -- done");
}
}
}
}
</p> -
ffmpeg concatenate images in one image
26 juillet 2016, par drlexaI use this to get frames from video and concatenate them in one image :
ffmpeg -i output.mp4 -vf 'fps=2,tile=1000x1' out.jpg
But there is a problem : I do not know number of frames that will be fetched. Here I hardcoded tile size 1000x1, but if there will be more than 1000 frames, then will be an error. Before starting ffmpeg I do not know actual size of tile.
So I want use command like :
ffmpeg -i output.mp4 -vf 'fps=2,tile=*x1' out.jpg
That means : I want you to concatenate ALL images that will be fetched in one row, but I cannot use * as an argument for tile.
Is there some way to solve my problem ?
-
Encoding a screenshot into a video using FFMPEG
2 juillet 2013, par mohMI'm trying to get the pixels from the screen, and encode the screenshot into a video using ffmpeg. I've seen a couple of examples but they either assume you already have the pixel data, or use image file input. It seems like whether I use sws_scale() or not (which is included in the examples I've seen), or whether I'm typecasting a HBITMAP or RGBQUAD* it's telling me that the image src data is bad and is encoding a blank image rather than the screenshot. Is there something I'm missing here ?
AVCodec* codec;
AVCodecContext* c = NULL;
AVFrame* inpic;
uint8_t* outbuf, *picture_buf;
int i, out_size, size, outbuf_size;
HBITMAP hBmp;
//int x,y;
avcodec_register_all();
printf("Video encoding\n");
// Find the mpeg1 video encoder
codec = avcodec_find_encoder(CODEC_ID_H264);
if (!codec) {
fprintf(stderr, "Codec not found\n");
exit(1);
}
else printf("H264 codec found\n");
c = avcodec_alloc_context3(codec);
inpic = avcodec_alloc_frame();
c->bit_rate = 400000;
c->width = screenWidth; // resolution must be a multiple of two
c->height = screenHeight;
c->time_base.num = 1;
c->time_base.den = 25;
c->gop_size = 10; // emit one intra frame every ten frames
c->max_b_frames=1;
c->pix_fmt = PIX_FMT_YUV420P;
c->codec_id = CODEC_ID_H264;
//c->codec_type = AVMEDIA_TYPE_VIDEO;
//av_opt_set(c->priv_data, "preset", "slow", 0);
//printf("Setting presets to slow for performance\n");
// Open the encoder
if (avcodec_open2(c, codec,NULL) < 0) {
fprintf(stderr, "Could not open codec\n");
exit(1);
}
else printf("H264 codec opened\n");
outbuf_size = 100000 + 12*c->width*c->height; // alloc image and output buffer
//outbuf_size = 100000;
outbuf = static_cast(malloc(outbuf_size));
size = c->width * c->height;
picture_buf = static_cast(malloc((size*3)/2));
printf("Setting buffer size to: %d\n",outbuf_size);
FILE* f = fopen("example.mpg","wb");
if(!f) printf("x - Cannot open video file for writing\n");
else printf("Opened video file for writing\n");
/*inpic->data[0] = picture_buf;
inpic->data[1] = inpic->data[0] + size;
inpic->data[2] = inpic->data[1] + size / 4;
inpic->linesize[0] = c->width;
inpic->linesize[1] = c->width / 2;
inpic->linesize[2] = c->width / 2;*/
//int x,y;
// encode 1 second of video
for(i=0;itime_base.den;i++) {
fflush(stdout);
HWND hDesktopWnd = GetDesktopWindow();
HDC hDesktopDC = GetDC(hDesktopWnd);
HDC hCaptureDC = CreateCompatibleDC(hDesktopDC);
hBmp = CreateCompatibleBitmap(GetDC(0), screenWidth, screenHeight);
SelectObject(hCaptureDC, hBmp);
BitBlt(hCaptureDC, 0, 0, screenWidth, screenHeight, hDesktopDC, 0, 0, SRCCOPY|CAPTUREBLT);
BITMAPINFO bmi = {0};
bmi.bmiHeader.biSize = sizeof(bmi.bmiHeader);
bmi.bmiHeader.biWidth = screenWidth;
bmi.bmiHeader.biHeight = screenHeight;
bmi.bmiHeader.biPlanes = 1;
bmi.bmiHeader.biBitCount = 32;
bmi.bmiHeader.biCompression = BI_RGB;
RGBQUAD *pPixels = new RGBQUAD[screenWidth*screenHeight];
GetDIBits(hCaptureDC,hBmp,0,screenHeight,pPixels,&bmi,DIB_RGB_COLORS);
inpic->pts = (float) i * (1000.0/(float)(c->time_base.den))*90;
avpicture_fill((AVPicture*)inpic, (uint8_t*)pPixels, PIX_FMT_BGR32, c->width, c->height); // Fill picture with image
av_image_alloc(inpic->data, inpic->linesize, c->width, c->height, c->pix_fmt, 1);
//printf("Allocated frame\n");
//SaveBMPFile(L"screenshot.bmp",hBmp,hDc,screenWidth,screenHeight);
ReleaseDC(hDesktopWnd,hDesktopDC);
DeleteDC(hCaptureDC);
DeleteObject(hBmp);
// encode the image
out_size = avcodec_encode_video(c, outbuf, outbuf_size, inpic);
printf("Encoding frame %3d (size=%5d)\n", i, out_size);
fwrite(outbuf, 1, out_size, f);
}
// get the delayed frames
for(; out_size; i++) {
fflush(stdout);
out_size = avcodec_encode_video(c, outbuf, outbuf_size, NULL);
printf("Writing frame %3d (size=%5d)\n", i, out_size);
fwrite(outbuf, 1, out_size, f);
}
// add sequence end code to have a real mpeg file
outbuf[0] = 0x00;
outbuf[1] = 0x00;
outbuf[2] = 0x01;
outbuf[3] = 0xb7;
fwrite(outbuf, 1, 4, f);
fclose(f);
free(picture_buf);
free(outbuf);
avcodec_close(c);
av_free(c);
av_free(inpic);
printf("Closed codec and Freed\n");