
Recherche avancée
Autres articles (49)
-
(Dés)Activation de fonctionnalités (plugins)
18 février 2011, parPour gérer l’ajout et la suppression de fonctionnalités supplémentaires (ou plugins), MediaSPIP utilise à partir de la version 0.2 SVP.
SVP permet l’activation facile de plugins depuis l’espace de configuration de MediaSPIP.
Pour y accéder, il suffit de se rendre dans l’espace de configuration puis de se rendre sur la page "Gestion des plugins".
MediaSPIP est fourni par défaut avec l’ensemble des plugins dits "compatibles", ils ont été testés et intégrés afin de fonctionner parfaitement avec chaque (...) -
Activation de l’inscription des visiteurs
12 avril 2011, parIl est également possible d’activer l’inscription des visiteurs ce qui permettra à tout un chacun d’ouvrir soit même un compte sur le canal en question dans le cadre de projets ouverts par exemple.
Pour ce faire, il suffit d’aller dans l’espace de configuration du site en choisissant le sous menus "Gestion des utilisateurs". Le premier formulaire visible correspond à cette fonctionnalité.
Par défaut, MediaSPIP a créé lors de son initialisation un élément de menu dans le menu du haut de la page menant (...) -
Publier sur MédiaSpip
13 juin 2013Puis-je poster des contenus à partir d’une tablette Ipad ?
Oui, si votre Médiaspip installé est à la version 0.2 ou supérieure. Contacter au besoin l’administrateur de votre MédiaSpip pour le savoir
Sur d’autres sites (7850)
-
How to mux raw PCM with h.264 to MP4 with ffmpeg API ?
5 janvier 2019, par Oliver.WongI’m doing some stuff with real-time multimedia transporting with FFmpeg API. Due to the requests of transporting audio/video data separately, so I need to mix them into an mp4 file at the receiver side.
Now I can mux PCM and H.264 into mp4 file, but when playing, only images come out, the audio can’t.
PCM raw data attribute : 8000 sample rate, mono channel, 16 bit.Can anybody give some advice to me ? Thanks a lot.
I’ve tried to add WAV header to in_pcm_file, and make sure the pcm file can be played by Windows Media Player. But still not working in MP4 file.
int muxerData(char *in_h264_file,
char *in_pcm_file,
char *out_mp4_file,
char *angle)
{
AVFormatContext *ifmt_ctx_v = NULL;
AVFormatContext *ifmt_ctx_a = NULL;
AVFormatContext *ofmt_ctx = NULL;
AVFormatContext *ifmt_ctx = NULL;
AVOutputFormat *ofmt = NULL;
AVPacket pkt = {0};
AVCodec *dec = NULL;
AVStream *in_stream = NULL;
AVStream *out_stream = NULL;
int ret = 0;
unsigned int i = 0;
int videoindex_v = -1;
int videoindex_out = -1;
int audioindex_a = -1;
int audioindex_out = -1;
int frame_index = 0;
int64_t cur_pts_v = 0;
int64_t cur_pts_a = 0;
int stream_index = 0;
int compare_tag = -1;
char log_buf[1024] = {0};
avcodec_register_all();
av_register_all();
//Input
if ((ret = avformat_open_input(&ifmt_ctx_a, in_pcm_file, NULL, NULL)) < 0)
{
av_strerror(ret, log_buf, 1024);
printf("Couldn't open file %s: %d(%s)", in_pcm_file, ret, log_buf);
goto end;
}
if ((ret = avformat_find_stream_info(ifmt_ctx_a, 0)) < 0)
{
printf("Failed to retrieve input stream information");
//if (acc_length>0)
// goto end;
}
if ((ret = avformat_open_input(&ifmt_ctx_v, in_h264_file, NULL, NULL)) < 0)
{
printf("Could not open input file:%d\n", ret);
goto end;
}
if ((ret = avformat_find_stream_info(ifmt_ctx_v, 0)) < 0)
{
printf("Failed to retrieve input stream information");
goto end;
}
av_dump_format(ifmt_ctx_v, 0, in_h264_file, 0);
av_dump_format(ifmt_ctx_a, 0, in_pcm_file, 0);
//Output
avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_mp4_file);
if (!ofmt_ctx)
{
printf("Could not create output context\n");
ret = AVERROR_UNKNOWN;
goto end;
}
ofmt = ofmt_ctx->oformat;
for (i = 0; i < ifmt_ctx_v->nb_streams; i++)
{
//Create output AVStream according to input AVStream
if (ifmt_ctx_v->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
{
in_stream = ifmt_ctx_v->streams[i];
out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
videoindex_v = i;
if (!out_stream)
{
printf("Failed allocating output stream\n");
ret = AVERROR_UNKNOWN;
goto end;
}
videoindex_out = out_stream->index;
//Copy the settings of AVCodecContext
ret = av_dict_set(&out_stream->metadata, "rotate", angle, 0);
if (avcodec_copy_context(out_stream->codec, in_stream->codec) < 0)
{
printf("Failed to copy context from input to output stream codec context\n");
goto end;
}
out_stream->codec->codec_tag = 0;
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
{
out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
break;
}
}
for (i = 0; i < ifmt_ctx_a->nb_streams; i++)
{
printf("===========acc=====from======:%d\n", ifmt_ctx_a->nb_streams);
//Create output AVStream according to input AVStream
if (ifmt_ctx_a->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO)
{
in_stream = ifmt_ctx_a->streams[i];
out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
audioindex_a = i;
if (!out_stream)
{
printf("Failed allocating output stream\n");
ret = AVERROR_UNKNOWN;
goto end;
}
audioindex_out = out_stream->index;
//Copy the settings of AVCodecContext
if (avcodec_copy_context(out_stream->codec, in_stream->codec) < 0)
{
printf("Failed to copy context from input to output stream codec context\n");
goto end;
}
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
{
out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
break;
}
}
printf("==========Output Information==========\n");
av_dump_format(ofmt_ctx, 0, out_mp4_file, 1);
printf("======================================\n");
//Open output file
if (!(ofmt->flags & AVFMT_NOFILE))
{
if (avio_open(&ofmt_ctx->pb, out_mp4_file, AVIO_FLAG_WRITE) < 0)
{
printf("Could not open output file '%s'", out_mp4_file);
goto end;
}
}
//Write file header
int header_ret = avformat_write_header(ofmt_ctx, NULL);
if (header_ret < 0)
{
av_strerror(header_ret, log_buf, 1024);
printf("Error occurred when opening output file:%d (%s)\n", header_ret, log_buf);
goto end;
}
//FIX
//AVBitStreamFilterContext* h264bsfc = av_bitstream_filter_init("h264_mp4toannexb");
//AVBitStreamFilterContext* aacbsfc = av_bitstream_filter_init("aac_adtstoasc");
while (1)
{
//Get an AVPacket
//if (acc_length>0)
//{
compare_tag = av_compare_ts(cur_pts_v,
ifmt_ctx_v->streams[videoindex_v]->time_base,
cur_pts_a,
ifmt_ctx_a->streams[audioindex_a]->time_base);
//}
if (compare_tag <= 0)
{
ifmt_ctx = ifmt_ctx_v;
stream_index = videoindex_out;
if (av_read_frame(ifmt_ctx, &pkt) >= 0)
{
do
{
in_stream = ifmt_ctx->streams[pkt.stream_index];
out_stream = ofmt_ctx->streams[stream_index];
if (pkt.stream_index == videoindex_v)
{
//FIX No PTS (Example: Raw H.264)
//Simple Write PTS
if (pkt.pts == AV_NOPTS_VALUE)
{
//Write PTS
AVRational time_base1 = in_stream->time_base;
//Duration between 2 frames (us)
int64_t calc_duration = (double)AV_TIME_BASE / av_q2d(in_stream->r_frame_rate);
//Parameters
pkt.pts = (double)(frame_index*calc_duration) / (double)(av_q2d(time_base1)*AV_TIME_BASE);
pkt.dts = pkt.pts;
pkt.duration = (double)calc_duration / (double)(av_q2d(time_base1)*AV_TIME_BASE);
frame_index++;
}
cur_pts_v = pkt.pts;
break;
}
} while (av_read_frame(ifmt_ctx, &pkt) >= 0);
}
else
{
break;
}
}
else
{
ifmt_ctx = ifmt_ctx_a;
stream_index = audioindex_out;
if (av_read_frame(ifmt_ctx, &pkt) >= 0)
{
do
{
in_stream = ifmt_ctx->streams[pkt.stream_index];
out_stream = ofmt_ctx->streams[stream_index];
if (pkt.stream_index == audioindex_a)
{
//FIX No PTS
//Simple Write PTS
if (pkt.pts == AV_NOPTS_VALUE)
{
//Write PTS
AVRational time_base1 = in_stream->time_base;
//Duration between 2 frames (us)
int64_t calc_duration = (double)AV_TIME_BASE / av_q2d(in_stream->r_frame_rate);
//Parameters
pkt.pts = (double)(frame_index*calc_duration) / (double)(av_q2d(time_base1)*AV_TIME_BASE);
pkt.dts = pkt.pts;
pkt.duration = (double)calc_duration / (double)(av_q2d(time_base1)*AV_TIME_BASE);
frame_index++;
}
cur_pts_a = pkt.pts;
break;
}
} while (av_read_frame(ifmt_ctx, &pkt) >= 0);
}
else
{
break;
}
}
//FIX:Bitstream Filter
//av_bitstream_filter_filter(h264bsfc, in_stream->codec, NULL, &pkt.data, &pkt.size, pkt.data, pkt.size, 0);
//av_bitstream_filter_filter(aacbsfc, out_stream->codec, NULL, &pkt.data, &pkt.size, pkt.data, pkt.size, 0);
//Convert PTS/DTS
pkt.pts = av_rescale_q_rnd(pkt.pts,
in_stream->time_base,
out_stream->time_base,
(AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
pkt.dts = av_rescale_q_rnd(pkt.dts,
in_stream->time_base,
out_stream->time_base,
(AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
pkt.pos = -1;
pkt.stream_index = stream_index;
printf("Write 1 Packet. size:%5d\tpts:%lld\n", pkt.size, pkt.pts);
if (av_interleaved_write_frame(ofmt_ctx, &pkt) < 0)
{
printf("Error muxing packet\n");
break;
}
av_free_packet(&pkt);
}
//Write file trailer
av_write_trailer(ofmt_ctx);
//av_bitstream_filter_close(h264bsfc);
//av_bitstream_filter_close(aacbsfc);
end:
avformat_close_input(&ifmt_ctx_v);
avformat_close_input(&ifmt_ctx_a);
/* close output */
if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
{
avio_close(ofmt_ctx->pb);
}
avformat_free_context(ofmt_ctx);
if ((ret < 0) && (ret != AVERROR_EOF))
{
printf("Error occurred.\n");
return -1;
}
printf("======muxer mp4 success =====!\n");
return 0;
} -
use ffmpeg api to convert audio files. crash on avcodec_encode_audio2
17 février 2014, par fabianFrom the examples I got the basic idea of this code.
However I am not sure, what I am missing, as muxing.c demuxing.c and decoding_encoding.c
all use different approaches.The process of converting an audio file to another file should go roughly like this :
inputfile -demux-> audiostream -read-> inPackets -decode2frames->
framesencode2packets-> outPackets -write-> audiostream -mux-> outputfile
However I found the following comment in demuxing.c :
/* Write the raw audio data samples of the first plane. This works
* fine for packed formats (e.g. AV_SAMPLE_FMT_S16). However,
* most audio decoders output planar audio, which uses a separate
* plane of audio samples for each channel (e.g. AV_SAMPLE_FMT_S16P).
* In other words, this code will write only the first audio channel
* in these cases.
* You should use libswresample or libavfilter to convert the frame
* to packed data. */My questions about this are :
-
Can I expect a frame that was retrieved by calling one of the decoder functions, f.e.
avcodec_decode_audio4 to hold suitable values to directly put it into an encoder or is
the resampling step mentioned in the comment mandatory ? -
Am I taking the right approach ? ffmpeg is very asymmetric, i.e. if there is a function
open_file_for_input there might not be a function open_file_for_output. Also there are different versions of many functions (avcodec_decode_audio[1-4]) and different naming
schemes, so it's very hard to tell, if the general approach is right, or actually an
ugly mixture of techniques that where used at different version bumps of ffmpeg. -
ffmpeg uses a lot of specific terms, like 'planar sampling' or 'packed format' and I am having a hard time, finding definitions for these terms. Is it possible to write working code, without deep knowledge of audio ?
Here is my code so far that right now crashes at avcodec_encode_audio2
and I don't know why.int Java_com_fscz_ffmpeg_Audio_convert(JNIEnv * env, jobject this, jstring jformat, jstring jcodec, jstring jsource, jstring jdest) {
jboolean isCopy;
jclass configClass = (*env)->FindClass(env, "com.fscz.ffmpeg.Config");
jfieldID fid = (*env)->GetStaticFieldID(env, configClass, "ffmpeg_logging", "I");
logging = (*env)->GetStaticIntField(env, configClass, fid);
/// open input
const char* sourceFile = (*env)->GetStringUTFChars(env, jsource, &isCopy);
AVFormatContext* pInputCtx;
AVStream* pInputStream;
open_input(sourceFile, &pInputCtx, &pInputStream);
// open output
const char* destFile = (*env)->GetStringUTFChars(env, jdest, &isCopy);
const char* cformat = (*env)->GetStringUTFChars(env, jformat, &isCopy);
const char* ccodec = (*env)->GetStringUTFChars(env, jcodec, &isCopy);
AVFormatContext* pOutputCtx;
AVOutputFormat* pOutputFmt;
AVStream* pOutputStream;
open_output(cformat, ccodec, destFile, &pOutputCtx, &pOutputFmt, &pOutputStream);
/// decode/encode
error = avformat_write_header(pOutputCtx, NULL);
DIE_IF_LESS_ZERO(error, "error writing output stream header to file: %s, error: %s", destFile, e2s(error));
AVFrame* frame = avcodec_alloc_frame();
DIE_IF_UNDEFINED(frame, "Could not allocate audio frame");
frame->pts = 0;
LOGI("allocate packet");
AVPacket pktIn;
AVPacket pktOut;
LOGI("done");
int got_frame, got_packet, len, frame_count = 0;
int64_t processed_time = 0, duration = pInputStream->duration;
while (av_read_frame(pInputCtx, &pktIn) >= 0) {
do {
len = avcodec_decode_audio4(pInputStream->codec, frame, &got_frame, &pktIn);
DIE_IF_LESS_ZERO(len, "Error decoding frame: %s", e2s(len));
if (len < 0) break;
len = FFMIN(len, pktIn.size);
size_t unpadded_linesize = frame->nb_samples * av_get_bytes_per_sample(frame->format);
LOGI("audio_frame n:%d nb_samples:%d pts:%s\n", frame_count++, frame->nb_samples, av_ts2timestr(frame->pts, &(pInputStream->codec->time_base)));
if (got_frame) {
do {
av_init_packet(&pktOut);
pktOut.data = NULL;
pktOut.size = 0;
LOGI("encode frame");
DIE_IF_UNDEFINED(pOutputStream->codec, "no output codec");
DIE_IF_UNDEFINED(frame->nb_samples, "no nb samples");
DIE_IF_UNDEFINED(pOutputStream->codec->internal, "no internal");
LOGI("tests done");
len = avcodec_encode_audio2(pOutputStream->codec, &pktOut, frame, &got_packet);
LOGI("encode done");
DIE_IF_LESS_ZERO(len, "Error (re)encoding frame: %s", e2s(len));
} while (!got_packet);
// write packet;
LOGI("write packet");
/* Write the compressed frame to the media file. */
error = av_interleaved_write_frame(pOutputCtx, &pktOut);
DIE_IF_LESS_ZERO(error, "Error while writing audio frame: %s", e2s(error));
av_free_packet(&pktOut);
}
pktIn.data += len;
pktIn.size -= len;
} while (pktIn.size > 0);
av_free_packet(&pktIn);
}
LOGI("write trailer");
av_write_trailer(pOutputCtx);
LOGI("end");
/// close resources
avcodec_free_frame(&frame);
avcodec_close(pInputStream->codec);
av_free(pInputStream->codec);
avcodec_close(pOutputStream->codec);
av_free(pOutputStream->codec);
avformat_close_input(&pInputCtx);
avformat_free_context(pOutputCtx);
return 0;
} -
-
RTSP client can not be play video
13 novembre 2018, par Harshil MakwanaI added and updated below API inside ffserver code inside ffmpeg code :
AVPacket *dataPacket;
void setAVPacket(AVPacket *packet)
{
if (packet && packet->data)
{
pthread_mutex_lock(&lock);
if (isSend == 1)
{
dataPacket = packet;
}
else
{
if (packet != NULL)
{
av_packet_unref(packet);
free(packet);
packet = NULL;
}
}
pthread_mutex_unlock(&lock);
}
static int http_prepare_data(HTTPContext *c)
{
int i, len, ret;
AVFormatContext *ctx;
av_freep(&c->pb_buffer);
switch(c->state) {
case HTTPSTATE_SEND_DATA_HEADER:
ctx = avformat_alloc_context();
if (!ctx)
return AVERROR(ENOMEM);
c->pfmt_ctx = ctx;
av_dict_copy(&(c->pfmt_ctx->metadata), c->stream->metadata, 0);
for(i=0;istream->nb_streams;i++) {
LayeredAVStream *src;
AVStream *st = avformat_new_stream(c->pfmt_ctx, NULL);
if (!st)
return AVERROR(ENOMEM);
/* if file or feed, then just take streams from FFServerStream
* struct */
if (!c->stream->feed ||
c->stream->feed == c->stream)
src = c->stream->streams[i];
else
src = c->stream->feed->streams[c->stream->feed_streams[i]];
unlayer_stream(c->pfmt_ctx->streams[i], src); //TODO we no longer copy st->internal, does this matter?
av_assert0(!c->pfmt_ctx->streams[i]->priv_data);
if (src->codec->flags & AV_CODEC_FLAG_BITEXACT)
c->pfmt_ctx->flags |= AVFMT_FLAG_BITEXACT;
}
/* set output format parameters */
c->pfmt_ctx->oformat = c->stream->fmt;
av_assert0(c->pfmt_ctx->nb_streams == c->stream->nb_streams);
c->got_key_frame = 0;
/* prepare header and save header data in a stream */
if (avio_open_dyn_buf(&c->pfmt_ctx->pb) < 0) {
/* XXX: potential leak */
return -1;
}
c->pfmt_ctx->pb->seekable = 0;
/*
* HACK to avoid MPEG-PS muxer to spit many underflow errors
* Default value from FFmpeg
* Try to set it using configuration option
*/
c->pfmt_ctx->max_delay = (int)(0.7*AV_TIME_BASE);
if ((ret = avformat_write_header(c->pfmt_ctx, NULL)) < 0) {
http_log("Error writing output header for stream '%s': %s\n",
c->stream->filename, av_err2str(ret));
return ret;
}
av_dict_free(&c->pfmt_ctx->metadata);
len = avio_close_dyn_buf(c->pfmt_ctx->pb, &c->pb_buffer);
c->buffer_ptr = c->pb_buffer;
c->buffer_end = c->pb_buffer + len;
c->state = HTTPSTATE_SEND_DATA;
c->last_packet_sent = 0;
break;
case HTTPSTATE_SEND_DATA:
/* find a new packet */
/* read a packet from the input stream */
if (c->stream->feed)
ffm_set_write_index(c->fmt_in,
c->stream->feed->feed_write_index,
c->stream->feed->feed_size);
if (c->stream->max_time &&
c->stream->max_time + c->start_time - cur_time < 0)
/* We have timed out */
c->state = HTTPSTATE_SEND_DATA_TRAILER;
else {
AVPacket pkt;
redo:
ret = av_read_frame(c->fmt_in, &pkt);
if (ret < 0) {
if (c->stream->feed) {
/* if coming from feed, it means we reached the end of the
* ffm file, so must wait for more data */
c->state = HTTPSTATE_WAIT_FEED;
return 1; /* state changed */
}
if (ret == AVERROR(EAGAIN)) {
/* input not ready, come back later */
return 0;
}
if (c->stream->loop) {
avformat_close_input(&c->fmt_in);
if (open_input_stream(c, "") < 0)
goto no_loop;
goto redo;
} else {
no_loop:
/* must send trailer now because EOF or error */
c->state = HTTPSTATE_SEND_DATA_TRAILER;
}
} else {
int source_index = pkt.stream_index;
/* update first pts if needed */
if (c->first_pts == AV_NOPTS_VALUE && pkt.dts != AV_NOPTS_VALUE) {
c->first_pts = av_rescale_q(pkt.dts, c->fmt_in->streams[pkt.stream_index]->time_base, AV_TIME_BASE_Q);
c->start_time = cur_time;
}
/* send it to the appropriate stream */
if (c->stream->feed) {
/* if coming from a feed, select the right stream */
if (c->switch_pending) {
c->switch_pending = 0;
for(i=0;istream->nb_streams;i++) {
if (c->switch_feed_streams[i] == pkt.stream_index)
if (pkt.flags & AV_PKT_FLAG_KEY)
c->switch_feed_streams[i] = -1;
if (c->switch_feed_streams[i] >= 0)
c->switch_pending = 1;
}
}
for(i=0;istream->nb_streams;i++) {
if (c->stream->feed_streams[i] == pkt.stream_index) {
AVStream *st = c->fmt_in->streams[source_index];
pkt.stream_index = i;
if (pkt.flags & AV_PKT_FLAG_KEY &&
(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ||
c->stream->nb_streams == 1))
c->got_key_frame = 1;
if (!c->stream->send_on_key || c->got_key_frame)
goto send_it;
}
}
} else {
AVStream *ist, *ost;
send_it:
ist = c->fmt_in->streams[source_index];
/* specific handling for RTP: we use several
* output streams (one for each RTP connection).
* XXX: need more abstract handling */
if (c->is_packetized) {
/* compute send time and duration */
if (pkt.dts != AV_NOPTS_VALUE) {
c->cur_pts = av_rescale_q(pkt.dts, ist->time_base, AV_TIME_BASE_Q);
c->cur_pts -= c->first_pts;
}
c->cur_frame_duration = av_rescale_q(pkt.duration, ist->time_base, AV_TIME_BASE_Q);
/* find RTP context */
c->packet_stream_index = pkt.stream_index;
ctx = c->rtp_ctx[c->packet_stream_index];
if(!ctx) {
av_packet_unref(&pkt);
break;
}
/* only one stream per RTP connection */
pkt.stream_index = 0;
} else {
ctx = c->pfmt_ctx;
/* Fudge here */
}
if (c->is_packetized) {
int max_packet_size;
if (c->rtp_protocol == RTSP_LOWER_TRANSPORT_TCP)
max_packet_size = RTSP_TCP_MAX_PACKET_SIZE;
else
max_packet_size = c->rtp_handles[c->packet_stream_index]->max_packet_size;
ret = ffio_open_dyn_packet_buf(&ctx->pb,
max_packet_size);
} else
ret = avio_open_dyn_buf(&ctx->pb);
if (ret < 0) {
/* XXX: potential leak */
return -1;
}
ost = ctx->streams[pkt.stream_index];
ctx->pb->seekable = 0;
if (pkt.dts != AV_NOPTS_VALUE)
pkt.dts = av_rescale_q(pkt.dts, ist->time_base,
ost->time_base);
if (pkt.pts != AV_NOPTS_VALUE)
pkt.pts = av_rescale_q(pkt.pts, ist->time_base,
ost->time_base);
pkt.duration = av_rescale_q(pkt.duration, ist->time_base,
ost->time_base);
if ((ret = av_write_frame(ctx, &pkt)) < 0) {
http_log("Error writing frame to output for stream '%s': %s\n",
c->stream->filename, av_err2str(ret));
c->state = HTTPSTATE_SEND_DATA_TRAILER;
}
av_freep(&c->pb_buffer);
len = avio_close_dyn_buf(ctx->pb, &c->pb_buffer);
ctx->pb = NULL;
c->cur_frame_bytes = len;
c->buffer_ptr = c->pb_buffer;
c->buffer_end = c->pb_buffer + len;
if (len == 0) {
av_packet_unref(&pkt);
goto redo;
}
}
av_packet_unref(&pkt);
}
}
break;
default:
case HTTPSTATE_SEND_DATA_TRAILER:
/* last packet test ? */
if (c->last_packet_sent || c->is_packetized)
return -1;
ctx = c->pfmt_ctx;
/* prepare header */
if (avio_open_dyn_buf(&ctx->pb) < 0) {
/* XXX: potential leak */
return -1;
}
c->pfmt_ctx->pb->seekable = 0;
av_write_trailer(ctx);
len = avio_close_dyn_buf(ctx->pb, &c->pb_buffer);
c->buffer_ptr = c->pb_buffer;
c->buffer_end = c->pb_buffer + len;
c->last_packet_sent = 1;
break;
}
return 0;
}if you see here there is API named setAVPacket(), through which I am passing my H264 based encoded packet to RTSPServer. And same AVPacket used by Other function named http_prepare_data(), which will be called when PLAY request is coming.
After implementing above code I can do handshake of RTSP and server can send RTP packet to client, but no player(tried VLC, ffplayer) can play video.
Can you help me on this ?
Very much thanks you in advance.