
Recherche avancée
Médias (1)
-
Rennes Emotion Map 2010-11
19 octobre 2011, par
Mis à jour : Juillet 2013
Langue : français
Type : Texte
Autres articles (106)
-
Creating farms of unique websites
13 avril 2011, parMediaSPIP platforms can be installed as a farm, with a single "core" hosted on a dedicated server and used by multiple websites.
This allows (among other things) : implementation costs to be shared between several different projects / individuals rapid deployment of multiple unique sites creation of groups of like-minded sites, making it possible to browse media in a more controlled and selective environment than the major "open" (...) -
Les autorisations surchargées par les plugins
27 avril 2010, parMediaspip core
autoriser_auteur_modifier() afin que les visiteurs soient capables de modifier leurs informations sur la page d’auteurs -
Supporting all media types
13 avril 2011, parUnlike most software and media-sharing platforms, MediaSPIP aims to manage as many different media types as possible. The following are just a few examples from an ever-expanding list of supported formats : images : png, gif, jpg, bmp and more audio : MP3, Ogg, Wav and more video : AVI, MP4, OGV, mpg, mov, wmv and more text, code and other data : OpenOffice, Microsoft Office (Word, PowerPoint, Excel), web (html, CSS), LaTeX, Google Earth and (...)
Sur d’autres sites (7956)
-
Python opencv ffmpeg threading exit functions
29 novembre 2020, par scacchii'm trying to finish the audio/video recording loop by pressing a key (or by other event), if i use the simple time.sleep() in the main loop work perfectly, after 5 second stop video and create file, if i use the function keyboard_pressed() doesnt execute stop_AVrecording() correctly and file_manager().
What's im doing wrong ?
Thanks in advance


from __future__ import print_function, division
import numpy as np
import cv2
import pyaudio
import wave
import threading
import time
import subprocess
import os
import keyboard

class VideoRecorder:
 "Video class based on openCV"
 def __init__(self, name="temp_video.avi", fourcc="MJPG", sizex=640, sizey=480, fps=30):
 self.open = True
 self.fps = fps # fps should be the minimum constant rate at which the camera can
 self.fourcc = fourcc # capture images (with no decrease in speed over time; testing is required)
 self.frameSize = (sizex, sizey) # video formats and sizes also depend and vary according to the camera used
 self.video_filename = name
 self.video_cap = cv2.VideoCapture(1, cv2.CAP_DSHOW)
 self.video_writer = cv2.VideoWriter_fourcc(*self.fourcc)
 self.video_out = cv2.VideoWriter(self.video_filename, self.video_writer, self.fps, self.frameSize)
 self.frame_counts = 1
 self.start_time = time.time()

 def record(self):
 "Video starts being recorded"
 counter = 1
 timer_start = time.time()
 timer_current = 0
 while self.open:
 ret, video_frame = self.video_cap.read()
 if ret:
 self.video_out.write(video_frame)
 # print(str(counter) + " " + str(self.frame_counts) + " frames written " + str(timer_current))
 self.frame_counts += 1
 counter += 1
 timer_current = time.time() - timer_start
 #time.sleep(1/self.fps)
 # gray = cv2.cvtColor(video_frame, cv2.COLOR_BGR2GRAY)
 cv2.imshow('video_frame', video_frame)
 cv2.waitKey(1)
 else:
 break

 def stop(self):
 "Finishes the video recording therefore the thread too"
 if self.open:
 self.open=False
 self.video_out.release()
 self.video_cap.release()
 cv2.destroyAllWindows()

 def start(self):
 "Launches the video recording function using a thread"
 video_thread = threading.Thread(target=self.record)
 video_thread.start()

class AudioRecorder():
 "Audio class based on pyAudio and Wave"
 def __init__(self, filename="temp_audio.wav", rate=44100, fpb=1024, channels=2):
 self.open = True
 self.rate = rate
 self.frames_per_buffer = fpb
 self.channels = channels
 self.format = pyaudio.paInt16
 self.audio_filename = filename
 self.audio = pyaudio.PyAudio()
 self.stream = self.audio.open(format=self.format,
 channels=self.channels,
 rate=self.rate,
 input=True,
 frames_per_buffer = self.frames_per_buffer)
 self.audio_frames = []

 def record(self):
 "Audio starts being recorded"
 self.stream.start_stream()
 while self.open:
 data = self.stream.read(self.frames_per_buffer)
 self.audio_frames.append(data)
 if not self.open:
 break

 def stop(self):
 "Finishes the audio recording therefore the thread too"
 if self.open:
 self.open = False
 self.stream.stop_stream()
 self.stream.close()
 self.audio.terminate()
 waveFile = wave.open(self.audio_filename, 'wb')
 waveFile.setnchannels(self.channels)
 waveFile.setsampwidth(self.audio.get_sample_size(self.format))
 waveFile.setframerate(self.rate)
 waveFile.writeframes(b''.join(self.audio_frames))
 waveFile.close()

 def start(self):
 "Launches the audio recording function using a thread"
 audio_thread = threading.Thread(target=self.record)
 audio_thread.start()

def start_AVrecording(filename="test"):
 global video_thread
 global audio_thread
 video_thread = VideoRecorder()
 audio_thread = AudioRecorder()
 audio_thread.start()
 video_thread.start()
 return filename


def start_video_recording(filename="test"):
 global video_thread
 video_thread = VideoRecorder()
 video_thread.start()
 return filename

def start_audio_recording(filename="test"):
 global audio_thread
 audio_thread = AudioRecorder()
 audio_thread.start()
 return filename

def stop_AVrecording(filename="test"):
 audio_thread.stop()
 frame_counts = video_thread.frame_counts
 elapsed_time = time.time() - video_thread.start_time
 recorded_fps = frame_counts / elapsed_time
 print("total frames " + str(frame_counts))
 print("elapsed time " + str(elapsed_time))
 print("recorded fps " + str(recorded_fps))
 video_thread.stop()

 # Makes sure the threads have finished
 while threading.active_count() > 1:
 time.sleep(1)

 # Merging audio and video signal
 if abs(recorded_fps - 6) >= 0.01: # If the fps rate was higher/lower than expected, re-encode it to the expected
 print("Re-encoding")
 cmd = "ffmpeg -r " + str(recorded_fps) + " -i temp_video.avi -pix_fmt yuv420p -r 6 temp_video2.avi"
 subprocess.call(cmd, shell=True)
 print("Muxing")
 cmd = "ffmpeg -y -ac 2 -channel_layout stereo -i temp_audio.wav -i temp_video2.avi -pix_fmt yuv420p " + filename + ".avi"
 subprocess.call(cmd, shell=True)
 else:
 print("Normal recording\nMuxing")
 cmd = "ffmpeg -y -ac 2 -channel_layout stereo -i temp_audio.wav -i temp_video.avi -pix_fmt yuv420p " + filename + ".avi"
 subprocess.call(cmd, shell=True)
 print("..")

def file_manager(filename="test"):
 "Required and wanted processing of final files"
 local_path = os.getcwd()
 if os.path.exists(str(local_path) + "/temp_audio.wav"):
 os.remove(str(local_path) + "/temp_audio.wav")
 if os.path.exists(str(local_path) + "/temp_video.avi"):
 os.remove(str(local_path) + "/temp_video.avi")
 if os.path.exists(str(local_path) + "/temp_video2.avi"):
 os.remove(str(local_path) + "/temp_video2.avi")
 # if os.path.exists(str(local_path) + "/" + filename + ".avi"):
 # os.remove(str(local_path) + "/" + filename + ".avi")

def keyboard_pressed():
 while True:
 if keyboard.is_pressed('q'): # if key 'q' is pressed
 print('------------You Pressed Q Key!--------------')
 #time.sleep(5)
 break


if __name__ == '__main__':
 start_AVrecording()
 #time.sleep(5)
 keyboard_pressed()
 print('-------------Time Sleep-------------------')
 time.sleep(5)
 print('-------------Stop AVrecording-------------')
 stop_AVrecording()
 print('-------------File Manager-----------------')
 file_manager()
 print('-----------------End----------------------')



'''


-
Encoding of raw frames (D3D11Texture2D) to an rtsp stream using libav*
16 juillet 2021, par uzerI have managed to create a rtsp stream using libav* and directX texture (which I am obtaining from GDI API using Bitblit method). Here's my approach for creating live rtsp stream :


- 

-
Create output context and stream (skipping the checks here)


- 

- avformat_alloc_output_context2(&ofmt_ctx, NULL, "rtsp", rtsp_url) ; //RTSP
- vid_codec = avcodec_find_encoder(ofmt_ctx->oformat->video_codec) ;
- vid_stream = avformat_new_stream(ofmt_ctx,vid_codec) ;
- vid_codec_ctx = avcodec_alloc_context3(vid_codec) ;










-
Set codec params


codec_ctx->codec_tag = 0;
codec_ctx->codec_id = ofmt_ctx->oformat->video_codec;
//codec_ctx->codec_type = AVMEDIA_TYPE_VIDEO;
codec_ctx->width = width; codec_ctx->height = height;
codec_ctx->gop_size = 12;
 //codec_ctx->gop_size = 40;
 //codec_ctx->max_b_frames = 3;
codec_ctx->pix_fmt = target_pix_fmt; // AV_PIX_FMT_YUV420P
codec_ctx->framerate = { stream_fps, 1 };
codec_ctx->time_base = { 1, stream_fps};
if (fctx->oformat->flags & AVFMT_GLOBALHEADER)
 {
 codec_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
 }



-
Initialize video stream


if (avcodec_parameters_from_context(stream->codecpar, codec_ctx) < 0)
{
 Debug::Error("Could not initialize stream codec parameters!");
 return false;
}

AVDictionary* codec_options = nullptr;
if (codec->id == AV_CODEC_ID_H264) {
 av_dict_set(&codec_options, "profile", "high", 0);
 av_dict_set(&codec_options, "preset", "fast", 0);
 av_dict_set(&codec_options, "tune", "zerolatency", 0);
}
// open video encoder
int ret = avcodec_open2(codec_ctx, codec, &codec_options);
if (ret<0) {
 Debug::Error("Could not open video encoder: ", avcodec_get_name(codec->id), " error ret: ", AVERROR(ret));
 return false;
}

stream->codecpar->extradata = codec_ctx->extradata;
stream->codecpar->extradata_size = codec_ctx->extradata_size;



-
Start streaming


// Create new frame and allocate buffer
AVFrame* AllocateFrameBuffer(AVCodecContext* codec_ctx, double width, double height)
{
 AVFrame* frame = av_frame_alloc();
 std::vector framebuf(av_image_get_buffer_size(codec_ctx->pix_fmt, width, height, 1));
 av_image_fill_arrays(frame->data, frame->linesize, framebuf.data(), codec_ctx->pix_fmt, width, height, 1);
 frame->width = width;
 frame->height = height;
 frame->format = static_cast<int>(codec_ctx->pix_fmt);
 //Debug::Log("framebuf size: ", framebuf.size(), " frame format: ", frame->format);
 return frame;
}

void RtspStream(AVFormatContext* ofmt_ctx, AVStream* vid_stream, AVCodecContext* vid_codec_ctx, char* rtsp_url)
{
 printf("Output stream info:\n");
 av_dump_format(ofmt_ctx, 0, rtsp_url, 1);

 const int width = WindowManager::Get().GetWindow(RtspStreaming::WindowId())->GetTextureWidth();
 const int height = WindowManager::Get().GetWindow(RtspStreaming::WindowId())->GetTextureHeight();

 //DirectX BGRA to h264 YUV420p
 SwsContext* conversion_ctx = sws_getContext(width, height, src_pix_fmt,
 vid_stream->codecpar->width, vid_stream->codecpar->height, target_pix_fmt, 
 SWS_BICUBIC | SWS_BITEXACT, nullptr, nullptr, nullptr);
if (!conversion_ctx)
{
 Debug::Error("Could not initialize sample scaler!");
 return;
}

 AVFrame* frame = AllocateFrameBuffer(vid_codec_ctx,vid_codec_ctx->width,vid_codec_ctx->height);
 if (!frame) {
 Debug::Error("Could not allocate video frame\n");
 return;
 }


 if (avformat_write_header(ofmt_ctx, NULL) < 0) {
 Debug::Error("Error occurred when writing header");
 return;
 }
 if (av_frame_get_buffer(frame, 0) < 0) {
 Debug::Error("Could not allocate the video frame data\n");
 return;
 }

 int frame_cnt = 0;
 //av start time in microseconds
 int64_t start_time_av = av_gettime();
 AVRational time_base = vid_stream->time_base;
 AVRational time_base_q = { 1, AV_TIME_BASE };

 // frame pixel data info
 int data_size = width * height * 4;
 uint8_t* data = new uint8_t[data_size];
// AVPacket* pkt = av_packet_alloc();

 while (RtspStreaming::IsStreaming())
 {
 /* make sure the frame data is writable */
 if (av_frame_make_writable(frame) < 0)
 {
 Debug::Error("Can't make frame writable");
 break;
 }

 //get copy/ref of the texture
 //uint8_t* data = WindowManager::Get().GetWindow(RtspStreaming::WindowId())->GetBuffer();
 if (!WindowManager::Get().GetWindow(RtspStreaming::WindowId())->GetPixels(data, 0, 0, width, height))
 {
 Debug::Error("Failed to get frame buffer. ID: ", RtspStreaming::WindowId());
 std::this_thread::sleep_for (std::chrono::seconds(2));
 continue;
 }
 //printf("got pixels data\n");
 // convert BGRA to yuv420 pixel format
 int srcStrides[1] = { 4 * width };
 if (sws_scale(conversion_ctx, &data, srcStrides, 0, height, frame->data, frame->linesize) < 0)
 {
 Debug::Error("Unable to scale d3d11 texture to frame. ", frame_cnt);
 break;
 }
 //Debug::Log("frame pts: ", frame->pts, " time_base:", av_rescale_q(1, vid_codec_ctx->time_base, vid_stream->time_base));
 frame->pts = frame_cnt++; 
 //frame_cnt++;
 //printf("scale conversion done\n");

 //encode to the video stream
 int ret = avcodec_send_frame(vid_codec_ctx, frame);
 if (ret < 0)
 {
 Debug::Error("Error sending frame to codec context! ",frame_cnt);
 break;
 }

 AVPacket* pkt = av_packet_alloc();
 //av_init_packet(pkt);
 ret = avcodec_receive_packet(vid_codec_ctx, pkt);
 if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
 {
 //av_packet_unref(pkt);
 av_packet_free(&pkt);
 continue;
 }
 else if (ret < 0)
 {
 Debug::Error("Error during receiving packet: ",AVERROR(ret));
 //av_packet_unref(pkt);
 av_packet_free(&pkt);
 break;
 }

 if (pkt->pts == AV_NOPTS_VALUE)
 {
 //Write PTS
 //Duration between 2 frames (us)
 int64_t calc_duration = (double)AV_TIME_BASE / av_q2d(vid_stream->r_frame_rate);
 //Parameters
 pkt->pts = (double)(frame_cnt * calc_duration) / (double)(av_q2d(time_base) * AV_TIME_BASE);
 pkt->dts = pkt->pts;
 pkt->duration = (double)calc_duration / (double)(av_q2d(time_base) * AV_TIME_BASE);
 }
 int64_t pts_time = av_rescale_q(pkt->dts, time_base, time_base_q);
 int64_t now_time = av_gettime() - start_time_av;

 if (pts_time > now_time)
 av_usleep(pts_time - now_time);

 //pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
 //pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
 //pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
 //pkt->pos = -1;

 //write frame and send
 if (av_interleaved_write_frame(ofmt_ctx, pkt)<0)
 {
 Debug::Error("Error muxing packet, frame number:",frame_cnt);
 break;
 }

 //Debug::Log("RTSP streaming...");
 //sstd::this_thread::sleep_for(std::chrono::milliseconds(1000/20));
 //av_packet_unref(pkt);
 av_packet_free(&pkt);
 }

 //av_free_packet(pkt);
 delete[] data;

 /* Write the trailer, if any. The trailer must be written before you
 * close the CodecContexts open when you wrote the header; otherwise
 * av_write_trailer() may try to use memory that was freed on
 * av_codec_close(). */
 av_write_trailer(ofmt_ctx);
 av_frame_unref(frame);
 av_frame_free(&frame);
 printf("streaming thread CLOSED!\n");
}
</int>












Now, this allows me to connect to my rtsp server and maintain the connection. However, on the rtsp client side I am getting either gray or single static frame as shown below :




Would appreciate if you can help with following questions :


- 

- Firstly, why the stream is not working in spite of continued connection to the server and updating frames ?
- Video codec. By default rtsp format uses Mpeg4 codec, is it possible to use h264 ? When I manually set it to AV_CODEC_ID_H264 the program fails at avcodec_open2 with return value of -22.
- Do I need to create and allocate new "AVFrame" and "AVPacket" for every frame ? Or can I just reuse global variable for this ?
- Do I need to explicitly define some code for real-time streaming ? (Like in ffmpeg we use "-re" flag).










Would be great if you can point out some example code for creating livestream. I have checked following resources :


- 

- https://github.com/FFmpeg/FFmpeg/blob/master/doc/examples/encode_video.c
- streaming FLV to RTMP with FFMpeg using H264 codec and C++ API to flv.js
- https://medium.com/swlh/streaming-video-with-ffmpeg-and-directx-11-7395fcb372c4








Update


While test I found that I am able to play the stream using ffplay, while it's getting stuck on VLC player. Here is snapshot on the ffplay log




-
-
Using FFmpeg with URL input causes SIGSEGV in AWS Lambda (Python runtime)
26 mars, par Dave94I'm trying to implement a video converting solution on AWS Lambda following their article named Processing user-generated content using AWS Lambda and FFmpeg.
However when I run my command with
subprocess.Popen()
it returns-11
which translates to SIGSEGV (segmentation fault).
I've tried to process the video with the newest (4.3.1) static build from John Van Sickle's site as with the "official" ffmpeg-lambda-layer but it seems like it doesn't matter which one I use, the result is the same.

If I download the video to the Lambda's
/tmp
directory and add this downloaded file as an input to FFmpeg it works correctly (with the same parameters). However I'm trying to prevent this as the/tmp
directory's max. size is only 512 MB which is not quite enough for me.

The relevant code which returns SIGSEGV :


ffmpeg_cmd = '/opt/bin/ffmpeg -stream_loop -1 -i "' + s3_source_signed_url + '" -i /opt/bin/audio.mp3 -i /opt/bin/watermark.png -shortest -y -deinterlace -vcodec libx264 -pix_fmt yuv420p -preset veryfast -r 30 -g 60 -b:v 4500k -c:a copy -map 0:v:0 -map 1:a:0 -filter_complex scale=1920:1080:force_original_aspect_ratio=decrease,pad=1920:1080:(ow-iw)/2:(oh-ih)/2,setsar=1,overlay=(W-w)/2:(H-h)/2,format=yuv420p -loglevel verbose -f flv -'
command1 = shlex.split(ffmpeg_cmd)
p1 = subprocess.Popen(command1, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p1.communicate()
print(p1.returncode) #prints -11



stderr of FFmpeg :


ffmpeg version 4.1.3-static https://johnvansickle.com/ffmpeg/ Copyright (c) 2000-2019 the FFmpeg developers
 built with gcc 6.3.0 (Debian 6.3.0-18+deb9u1) 20170516
 configuration: --enable-gpl --enable-version3 --enable-static --disable-debug --disable-ffplay --disable-indev=sndio --disable-outdev=sndio --cc=gcc-6 --enable-fontconfig --enable-frei0r --enable-gnutls --enable-gmp --enable-gray --enable-libaom --enable-libfribidi --enable-libass --enable-libvmaf --enable-libfreetype --enable-libmp3lame --enable-libopencore-amrnb --enable-libopencore-amrwb --enable-libopenjpeg --enable-librubberband --enable-libsoxr --enable-libspeex --enable-libvorbis --enable-libopus --enable-libtheora --enable-libvidstab --enable-libvo-amrwbenc --enable-libvpx --enable-libwebp --enable-libx264 --enable-libx265 --enable-libxml2 --enable-libxvid --enable-libzvbi --enable-libzimg
 libavutil 56. 22.100 / 56. 22.100
 libavcodec 58. 35.100 / 58. 35.100
 libavformat 58. 20.100 / 58. 20.100
 libavdevice 58. 5.100 / 58. 5.100
 libavfilter 7. 40.101 / 7. 40.101
 libswscale 5. 3.100 / 5. 3.100
 libswresample 3. 3.100 / 3. 3.100
 libpostproc 55. 3.100 / 55. 3.100
[tcp @ 0x728cc00] Starting connection attempt to 52.219.74.177 port 443
[tcp @ 0x728cc00] Successfully connected to 52.219.74.177 port 443
[h264 @ 0x729b780] Reinit context to 1280x720, pix_fmt: yuv420p
Input #0, mov,mp4,m4a,3gp,3g2,mj2, from 'https://bucket.s3.amazonaws.com --> presigned url with 15 min expiration time':
 Metadata:
 major_brand : mp42
 minor_version : 0
 compatible_brands: mp42mp41isomavc1
 creation_time : 2015-09-02T07:42:42.000000Z
 Duration: 00:00:15.64, start: 0.000000, bitrate: 2640 kb/s
 Stream #0:0(und): Video: h264 (High), 1 reference frame (avc1 / 0x31637661), yuv420p(tv, bt709, left), 1280x720 [SAR 1:1 DAR 16:9], 2475 kb/s, 25 fps, 25 tbr, 25 tbn, 50 tbc (default)
 Metadata:
 creation_time : 2015-09-02T07:42:42.000000Z
 handler_name : L-SMASH Video Handler
 encoder : AVC Coding
 Stream #0:1(und): Audio: aac (LC) (mp4a / 0x6134706D), 48000 Hz, stereo, fltp, 160 kb/s (default)
 Metadata:
 creation_time : 2015-09-02T07:42:42.000000Z
 handler_name : L-SMASH Audio Handler
[mp3 @ 0x733f340] Skipping 0 bytes of junk at 1344.
Input #1, mp3, from '/opt/bin/audio.mp3':
 Metadata:
 encoded_by : Logic Pro X
 date : 2021-01-03
 coding_history : 
 time_reference : 158760000
 umid : 0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004500F9E4
 encoder : Lavf58.49.100
 Duration: 00:04:01.21, start: 0.025057, bitrate: 320 kb/s
 Stream #1:0: Audio: mp3, 44100 Hz, stereo, fltp, 320 kb/s
 Metadata:
 encoder : Lavc58.97
Input #2, png_pipe, from '/opt/bin/watermark.png':
 Duration: N/A, bitrate: N/A
 Stream #2:0: Video: png, 1 reference frame, rgba(pc), 701x190 [SAR 1521:1521 DAR 701:190], 25 tbr, 25 tbn, 25 tbc
[Parsed_scale_0 @ 0x7341140] w:1920 h:1080 flags:'bilinear' interl:0
Stream mapping:
 Stream #0:0 (h264) -> scale
 Stream #2:0 (png) -> overlay:overlay
 format -> Stream #0:0 (libx264)
 Stream #1:0 -> #0:1 (copy)
Press [q] to stop, [?] for help
[h264 @ 0x72d8600] Reinit context to 1280x720, pix_fmt: yuv420p
[Parsed_scale_0 @ 0x733c1c0] w:1920 h:1080 flags:'bilinear' interl:0
[graph 0 input from stream 0:0 @ 0x7669200] w:1280 h:720 pixfmt:yuv420p tb:1/25 fr:25/1 sar:1/1 sws_param:flags=2
[graph 0 input from stream 2:0 @ 0x766a980] w:701 h:190 pixfmt:rgba tb:1/25 fr:25/1 sar:1521/1521 sws_param:flags=2
[auto_scaler_0 @ 0x7670240] w:iw h:ih flags:'bilinear' interl:0
[deinterlace_in_2_0 @ 0x766b680] auto-inserting filter 'auto_scaler_0' between the filter 'graph 0 input from stream 2:0' and the filter 'deinterlace_in_2_0'
[Parsed_scale_0 @ 0x733c1c0] w:1280 h:720 fmt:yuv420p sar:1/1 -> w:1920 h:1080 fmt:yuv420p sar:1/1 flags:0x2
[Parsed_pad_1 @ 0x733ce00] w:1920 h:1080 -> w:1920 h:1080 x:0 y:0 color:0x000000FF
[Parsed_setsar_2 @ 0x733da00] w:1920 h:1080 sar:1/1 dar:16/9 -> sar:1/1 dar:16/9
[auto_scaler_0 @ 0x7670240] w:701 h:190 fmt:rgba sar:1521/1521 -> w:701 h:190 fmt:yuva420p sar:1/1 flags:0x2
[Parsed_overlay_3 @ 0x733e440] main w:1920 h:1080 fmt:yuv420p overlay w:701 h:190 fmt:yuva420p
[Parsed_overlay_3 @ 0x733e440] [framesync @ 0x733e5a8] Selected 1/50 time base
[Parsed_overlay_3 @ 0x733e440] [framesync @ 0x733e5a8] Sync level 2
[libx264 @ 0x72c1c00] using SAR=1/1
[libx264 @ 0x72c1c00] using cpu capabilities: MMX2 SSE2Fast SSSE3 SSE4.2 AVX FMA3 BMI2 AVX2
[libx264 @ 0x72c1c00] profile Progressive High, level 4.0, 4:2:0, 8-bit
[libx264 @ 0x72c1c00] 264 - core 157 r2969 d4099dd - H.264/MPEG-4 AVC codec - Copyleft 2003-2019 - http://www.videolan.org/x264.html - options: cabac=1 ref=1 deblock=1:0:0 analyse=0x3:0x113 me=hex subme=2 psy=1 psy_rd=1.00:0.00 mixed_ref=0 me_range=16 chroma_me=1 trellis=0 8x8dct=1 cqm=0 deadzone=21,11 fast_pskip=1 chroma_qp_offset=0 threads=9 lookahead_threads=3 sliced_threads=0 nr=0 decimate=1 interlaced=0 bluray_compat=0 constrained_intra=0 bframes=3 b_pyramid=2 b_adapt=1 b_bias=0 direct=1 weightb=1 open_gop=0 weightp=1 keyint=60 keyint_min=6 scenecut=40 intra_refresh=0 rc_lookahead=10 rc=abr mbtree=1 bitrate=4500 ratetol=1.0 qcomp=0.60 qpmin=0 qpmax=69 qpstep=4 ip_ratio=1.40 aq=1:1.00
Output #0, flv, to 'pipe:':
 Metadata:
 major_brand : mp42
 minor_version : 0
 compatible_brands: mp42mp41isomavc1
 encoder : Lavf58.20.100
 Stream #0:0: Video: h264 (libx264), 1 reference frame ([7][0][0][0] / 0x0007), yuv420p, 1920x1080 [SAR 1:1 DAR 16:9], q=-1--1, 4500 kb/s, 30 fps, 1k tbn, 30 tbc (default)
 Metadata:
 encoder : Lavc58.35.100 libx264
 Side data:
 cpb: bitrate max/min/avg: 0/0/4500000 buffer size: 0 vbv_delay: -1
 Stream #0:1: Audio: mp3 ([2][0][0][0] / 0x0002), 44100 Hz, stereo, fltp, 320 kb/s
 Metadata:
 encoder : Lavc58.97
frame= 27 fps=0.0 q=32.0 size= 247kB time=00:00:00.03 bitrate=59500.0kbits/s speed=0.0672x
frame= 77 fps= 77 q=27.0 size= 1115kB time=00:00:02.03 bitrate=4478.0kbits/s speed=2.03x
frame= 126 fps= 83 q=25.0 size= 2302kB time=00:00:04.00 bitrate=4712.4kbits/s speed=2.64x
frame= 177 fps= 87 q=26.0 size= 3576kB time=00:00:06.03 bitrate=4854.4kbits/s speed=2.97x
frame= 225 fps= 88 q=25.0 size= 4910kB time=00:00:07.96 bitrate=5047.8kbits/s speed=3.13x
frame= 272 fps= 89 q=27.0 size= 6189kB time=00:00:09.84 bitrate=5147.9kbits/s speed=3.22x
frame= 320 fps= 90 q=27.0 size= 7058kB time=00:00:11.78 bitrate=4907.5kbits/s speed=3.31x
frame= 372 fps= 91 q=26.0 size= 8098kB time=00:00:13.84 bitrate=4791.0kbits/s speed=3.4x



And that's the end of it. It should continue to do the processing until
00:04:02
as that's my audio's length but it stops here every time (approximately this is my video length).

The relevant code which works correctly :


ffmpeg_cmd = '/opt/bin/ffmpeg -stream_loop -1 -i "' + '/tmp/' + s3_source_key + '" -i /opt/bin/audio.mp3 -i /opt/bin/watermark.png -shortest -y -deinterlace -vcodec libx264 -pix_fmt yuv420p -preset veryfast -r 30 -g 60 -b:v 4500k -c:a copy -map 0:v:0 -map 1:a:0 -filter_complex scale=1920:1080:force_original_aspect_ratio=decrease,pad=1920:1080:(ow-iw)/2:(oh-ih)/2,setsar=1,overlay=(W-w)/2:(H-h)/2,format=yuv420p -loglevel verbose -f flv -'
command1 = shlex.split(ffmpeg_cmd)
p1 = subprocess.Popen(command1, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p1.communicate()
print(p1.returncode) #prints 0



With this code it repeats the video as many times as it has to do to be as long as the audio.


Both versions work correctly on my computer.


This question is almost the same but in my case FFmpeg is able to access the signed URL.