You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1264 lines
43 KiB

diff --git a/operations/external/ff-load.c b/operations/external/ff-load.c
index 0d9a4f3fbef422a8ff548820ee8843e0a2ffa7ec..ec3b747f976a31cfabe811ba03fb7487839342f0 100644
--- a/operations/external/ff-load.c
+++ b/operations/external/ff-load.c
@@ -62,9 +62,11 @@ property_audio_fragment (audio, _("audio"), 0)
#include <limits.h>
#include <stdlib.h>
+#include <libavutil/channel_layout.h>
#include <libavutil/avutil.h>
#include <libavutil/imgutils.h>
#include <libavformat/avformat.h>
+#include <libavcodec/avcodec.h>
#include <libswscale/swscale.h>
@@ -74,11 +76,12 @@ typedef struct
gint height;
gdouble fps;
gint codec_delay;
+ int64_t first_dts;
gchar *loadedfilename; /* to remember which file is "cached" */
AVFormatContext *audio_fcontext;
- AVCodec *audio_codec;
+ const AVCodec *audio_codec;
int audio_index;
GList *audio_track;
long audio_cursor_pos;
@@ -90,8 +93,10 @@ typedef struct
AVFormatContext *video_fcontext;
int video_index;
AVStream *video_stream;
+ AVCodecContext *video_ctx;
AVStream *audio_stream;
- AVCodec *video_codec;
+ AVCodecContext *audio_ctx;
+ const AVCodec *video_codec;
AVFrame *lavc_frame;
AVFrame *rgb_frame;
glong prevframe; /* previously decoded frame number */
@@ -140,10 +145,8 @@ ff_cleanup (GeglProperties *o)
{
clear_audio_track (o);
g_free (p->loadedfilename);
- if (p->video_stream && p->video_stream->codec)
- avcodec_close (p->video_stream->codec);
- if (p->audio_stream && p->audio_stream->codec)
- avcodec_close (p->audio_stream->codec);
+ avcodec_free_context (&p->video_ctx);
+ avcodec_free_context (&p->audio_ctx);
if (p->video_fcontext)
avformat_close_input(&p->video_fcontext);
if (p->audio_fcontext)
@@ -202,14 +205,14 @@ decode_audio (GeglOperation *operation,
if (av_seek_frame (p->audio_fcontext, p->audio_stream->index, seek_target, (AVSEEK_FLAG_BACKWARD)) < 0)
fprintf (stderr, "audio seek error!\n");
else
- avcodec_flush_buffers (p->audio_stream->codec);
+ avcodec_flush_buffers (p->audio_ctx);
}
while (p->prevapts <= pts2)
{
AVPacket pkt = {0,};
- int decoded_bytes;
+ int ret;
if (av_read_frame (p->audio_fcontext, &pkt) < 0)
{
@@ -219,77 +222,93 @@ decode_audio (GeglOperation *operation,
if (pkt.stream_index==p->audio_index && p->audio_stream)
{
static AVFrame frame;
- int got_frame;
- decoded_bytes = avcodec_decode_audio4(p->audio_stream->codec,
- &frame, &got_frame, &pkt);
-
- if (decoded_bytes < 0)
+ ret = avcodec_send_packet (p->audio_ctx, &pkt);
+ if (ret < 0)
{
- fprintf (stderr, "avcodec_decode_audio4 failed for %s\n",
+ fprintf (stderr, "avcodec_send_packet failed for %s\n",
o->path);
}
-
- if (got_frame) {
- int samples_left = frame.nb_samples;
- int si = 0;
-
- while (samples_left)
+ while (ret == 0)
{
- int sample_count = samples_left;
- int channels = MIN(p->audio_stream->codecpar->channels, GEGL_MAX_AUDIO_CHANNELS);
- GeglAudioFragment *af = gegl_audio_fragment_new (o->audio_sample_rate, channels,
- AV_CH_LAYOUT_STEREO, samples_left);
-//);
- switch (p->audio_stream->codec->sample_fmt)
- {
- case AV_SAMPLE_FMT_FLT:
- for (gint i = 0; i < sample_count; i++)
- for (gint c = 0; c < channels; c++)
- af->data[c][i] = ((int16_t *)frame.data[0])[(i + si) * channels + c];
- break;
- case AV_SAMPLE_FMT_FLTP:
- for (gint i = 0; i < sample_count; i++)
- for (gint c = 0; c < channels; c++)
- {
- af->data[c][i] = ((float *)frame.data[c])[i + si];
- }
- break;
- case AV_SAMPLE_FMT_S16:
- for (gint i = 0; i < sample_count; i++)
- for (gint c = 0; c < channels; c++)
- af->data[c][i] = ((int16_t *)frame.data[0])[(i + si) * channels + c] / 32768.0;
- break;
- case AV_SAMPLE_FMT_S16P:
- for (gint i = 0; i < sample_count; i++)
- for (gint c = 0; c < channels; c++)
- af->data[c][i] = ((int16_t *)frame.data[c])[i + si] / 32768.0;
- break;
- case AV_SAMPLE_FMT_S32:
- for (gint i = 0; i < sample_count; i++)
- for (gint c = 0; c < channels; c++)
- af->data[c][i] = ((int32_t *)frame.data[0])[(i + si) * channels + c] / 2147483648.0;
+ ret = avcodec_receive_frame (p->audio_ctx, &frame);
+ if (ret == AVERROR(EAGAIN))
+ {
+ // no more frames; should send the next packet now
+ ret = 0;
break;
- case AV_SAMPLE_FMT_S32P:
- for (gint i = 0; i < sample_count; i++)
- for (gint c = 0; c < channels; c++)
- af->data[c][i] = ((int32_t *)frame.data[c])[i + si] / 2147483648.0;
+ }
+ else if (ret < 0)
+ {
+ fprintf (stderr, "avcodec_receive_frame failed for %s\n",
+ o->path);
break;
- default:
- g_warning ("undealt with sample format\n");
}
- gegl_audio_fragment_set_sample_count (af, sample_count);
- gegl_audio_fragment_set_pos (af,
- (long int)av_rescale_q ((pkt.pts), p->audio_stream->time_base, AV_TIME_BASE_Q) * o->audio_sample_rate /AV_TIME_BASE);
-
- p->audio_pos += sample_count;
- p->audio_track = g_list_append (p->audio_track, af);
-
- samples_left -= sample_count;
- si += sample_count;
- }
- p->prevapts = pkt.pts * av_q2d (p->audio_stream->time_base);
- }
+ int samples_left = frame.nb_samples;
+ int si = 0;
+
+ while (samples_left)
+ {
+ int sample_count = samples_left;
+ int channels = MIN(p->audio_stream->codecpar->channels, GEGL_MAX_AUDIO_CHANNELS);
+ GeglAudioFragment *af = gegl_audio_fragment_new (o->audio_sample_rate, channels,
+ AV_CH_LAYOUT_STEREO, samples_left);
+ //);
+ switch (p->audio_ctx->sample_fmt)
+ {
+ case AV_SAMPLE_FMT_FLT:
+ for (gint i = 0; i < sample_count; i++)
+ for (gint c = 0; c < channels; c++)
+ af->data[c][i] = ((int16_t *)frame.data[0])[(i + si) * channels + c];
+ break;
+ case AV_SAMPLE_FMT_FLTP:
+ for (gint i = 0; i < sample_count; i++)
+ for (gint c = 0; c < channels; c++)
+ {
+ af->data[c][i] = ((float *)frame.data[c])[i + si];
+ }
+ break;
+ case AV_SAMPLE_FMT_S16:
+ for (gint i = 0; i < sample_count; i++)
+ for (gint c = 0; c < channels; c++)
+ af->data[c][i] = ((int16_t *)frame.data[0])[(i + si) * channels + c] / 32768.0;
+ break;
+ case AV_SAMPLE_FMT_S16P:
+ for (gint i = 0; i < sample_count; i++)
+ for (gint c = 0; c < channels; c++)
+ af->data[c][i] = ((int16_t *)frame.data[c])[i + si] / 32768.0;
+ break;
+ case AV_SAMPLE_FMT_S32:
+ for (gint i = 0; i < sample_count; i++)
+ for (gint c = 0; c < channels; c++)
+ af->data[c][i] = ((int32_t *)frame.data[0])[(i + si) * channels + c] / 2147483648.0;
+ break;
+ case AV_SAMPLE_FMT_S32P:
+ for (gint i = 0; i < sample_count; i++)
+ for (gint c = 0; c < channels; c++)
+ af->data[c][i] = ((int32_t *)frame.data[c])[i + si] / 2147483648.0;
+ break;
+ default:
+ g_warning ("undealt with sample format\n");
+ }
+ gegl_audio_fragment_set_sample_count (af, sample_count);
+ gegl_audio_fragment_set_pos (
+ af,
+ (long int)av_rescale_q (
+ (pkt.pts),
+ p->audio_stream->time_base,
+ AV_TIME_BASE_Q
+ ) * o->audio_sample_rate / AV_TIME_BASE
+ );
+
+ p->audio_pos += sample_count;
+ p->audio_track = g_list_append (p->audio_track, af);
+
+ samples_left -= sample_count;
+ si += sample_count;
+ }
+ p->prevapts = pkt.pts * av_q2d (p->audio_stream->time_base);
+ }
}
av_packet_unref (&pkt);
}
@@ -325,12 +344,12 @@ decode_frame (GeglOperation *operation,
if (frame < 2 || frame > prevframe + 64 || frame < prevframe )
{
int64_t seek_target = av_rescale_q (((frame) * AV_TIME_BASE * 1.0) / o->frame_rate
-, AV_TIME_BASE_Q, p->video_stream->time_base) / p->video_stream->codec->ticks_per_frame;
+, AV_TIME_BASE_Q, p->video_stream->time_base) / p->video_ctx->ticks_per_frame;
if (av_seek_frame (p->video_fcontext, p->video_index, seek_target, (AVSEEK_FLAG_BACKWARD )) < 0)
fprintf (stderr, "video seek error!\n");
else
- avcodec_flush_buffers (p->video_stream->codec);
+ avcodec_flush_buffers (p->video_ctx);
prevframe = -1;
}
@@ -340,7 +359,7 @@ decode_frame (GeglOperation *operation,
int got_picture = 0;
do
{
- int decoded_bytes;
+ int ret;
AVPacket pkt = {0,};
do
@@ -354,33 +373,52 @@ decode_frame (GeglOperation *operation,
}
while (pkt.stream_index != p->video_index);
- decoded_bytes = avcodec_decode_video2 (
- p->video_stream->codec, p->lavc_frame,
- &got_picture, &pkt);
- if (decoded_bytes < 0)
+ ret = avcodec_send_packet (p->video_ctx, &pkt);
+ if (ret < 0)
{
- fprintf (stderr, "avcodec_decode_video failed for %s\n",
+ fprintf (stderr, "avcodec_send_packet failed for %s\n",
o->path);
return -1;
}
-
- if(got_picture)
- {
- if ((pkt.dts == pkt.pts) || (p->lavc_frame->key_frame!=0))
- {
- p->lavc_frame->pts = (p->video_stream->cur_dts -
- p->video_stream->first_dts);
- p->prevpts = av_rescale_q (p->lavc_frame->pts,
- p->video_stream->time_base,
- AV_TIME_BASE_Q) * 1.0 / AV_TIME_BASE;
- decodeframe = roundf( p->prevpts * o->frame_rate);
- }
- else
- {
- p->prevpts += 1.0 / o->frame_rate;
- decodeframe = roundf ( p->prevpts * o->frame_rate);
- }
- }
+ while (ret == 0)
+ {
+ if (!p->first_dts)
+ p->first_dts = pkt.dts;
+ ret = avcodec_receive_frame (p->video_ctx, p->lavc_frame);
+ if (ret == AVERROR(EAGAIN))
+ {
+ // no more frames; should send the next packet now
+ ret = 0;
+ break;
+ }
+ else if (ret < 0)
+ {
+ fprintf (stderr, "avcodec_receive_frame failed for %s\n",
+ o->path);
+ break;
+ }
+ got_picture = 1;
+ if ((pkt.dts == pkt.pts) || (p->lavc_frame->key_frame!=0))
+ {
+ // cur_dts and first_dts are moved to libavformat/internal.h
+ /*
+ p->lavc_frame->pts = (p->video_stream->cur_dts -
+ p->video_stream->first_dts);
+ */
+ p->lavc_frame->pts = pkt.dts - p->first_dts;
+ p->prevpts = av_rescale_q (p->lavc_frame->pts,
+ p->video_stream->time_base,
+ AV_TIME_BASE_Q) * 1.0 / AV_TIME_BASE;
+ decodeframe = roundf( p->prevpts * o->frame_rate);
+ }
+ else
+ {
+ p->prevpts += 1.0 / o->frame_rate;
+ decodeframe = roundf ( p->prevpts * o->frame_rate);
+ }
+ if (decodeframe > frame + p->codec_delay)
+ break;
+ }
#if 0
if (decoded_bytes != pkt.size)
fprintf (stderr, "bytes left!\n");
@@ -429,6 +467,7 @@ prepare (GeglOperation *operation)
if (err < 0)
{
print_error (o->path, err);
+ return;
}
err = avformat_find_stream_info (p->video_fcontext, NULL);
if (err < 0)
@@ -440,6 +479,7 @@ prepare (GeglOperation *operation)
if (err < 0)
{
print_error (o->path, err);
+ return;
}
err = avformat_find_stream_info (p->audio_fcontext, NULL);
if (err < 0)
@@ -467,16 +507,26 @@ prepare (GeglOperation *operation)
{
p->video_codec = avcodec_find_decoder (p->video_stream->codecpar->codec_id);
if (p->video_codec == NULL)
- g_warning ("video codec not found");
- p->video_stream->codec->err_recognition = AV_EF_IGNORE_ERR |
+ {
+ g_warning ("video codec not found");
+ p->video_ctx = NULL;
+ return;
+ }
+ p->video_ctx = avcodec_alloc_context3 (p->video_codec);
+ if (avcodec_parameters_to_context (p->video_ctx, p->video_stream->codecpar) < 0)
+ {
+ fprintf (stderr, "cannot copy video codec parameters\n");
+ return;
+ }
+ p->video_ctx->err_recognition = AV_EF_IGNORE_ERR |
AV_EF_BITSTREAM |
AV_EF_BUFFER;
- p->video_stream->codec->workaround_bugs = FF_BUG_AUTODETECT;
+ p->video_ctx->workaround_bugs = FF_BUG_AUTODETECT;
- if (avcodec_open2 (p->video_stream->codec, p->video_codec, NULL) < 0)
+ if (avcodec_open2 (p->video_ctx, p->video_codec, NULL) < 0)
{
- g_warning ("error opening codec %s", p->video_stream->codec->codec->name);
+ g_warning ("error opening codec %s", p->video_ctx->codec->name);
return;
}
}
@@ -485,10 +535,20 @@ prepare (GeglOperation *operation)
{
p->audio_codec = avcodec_find_decoder (p->audio_stream->codecpar->codec_id);
if (p->audio_codec == NULL)
- g_warning ("audio codec not found");
- else if (avcodec_open2 (p->audio_stream->codec, p->audio_codec, NULL) < 0)
{
- g_warning ("error opening codec %s", p->audio_stream->codec->codec->name);
+ g_warning ("audio codec not found");
+ p->audio_ctx = NULL;
+ return;
+ }
+ p->audio_ctx = avcodec_alloc_context3 (p->audio_codec);
+ if (avcodec_parameters_to_context (p->audio_ctx, p->audio_stream->codecpar) < 0)
+ {
+ fprintf (stderr, "cannot copy audio codec parameters\n");
+ return;
+ }
+ if (avcodec_open2 (p->audio_ctx, p->audio_codec, NULL) < 0)
+ {
+ g_warning ("error opening codec %s", p->audio_ctx->codec->name);
}
else
{
@@ -544,7 +604,7 @@ prepare (GeglOperation *operation)
fprintf (stdout, "duration: %02i:%02i:%02i\n", h, m, s);
}
#endif
- p->codec_delay = p->video_stream->codec->delay;
+ p->codec_delay = p->video_ctx->delay;
if (!strcmp (o->video_codec, "mpeg1video"))
p->codec_delay = 1;
@@ -736,7 +796,7 @@ process (GeglOperation *operation,
if (p->video_stream == NULL)
return TRUE;
- if (p->video_stream->codec->pix_fmt == AV_PIX_FMT_RGB24)
+ if (p->video_ctx->pix_fmt == AV_PIX_FMT_RGB24)
{
GeglRectangle extent = {0,0,p->width,p->height};
gegl_buffer_set (output, &extent, 0, babl_format("R'G'B' u8"), p->lavc_frame->data[0], GEGL_AUTO_ROWSTRIDE);
@@ -746,7 +806,7 @@ process (GeglOperation *operation,
struct SwsContext *img_convert_ctx;
GeglRectangle extent = {0,0,p->width,p->height};
- img_convert_ctx = sws_getContext(p->width, p->height, p->video_stream->codec->pix_fmt,
+ img_convert_ctx = sws_getContext(p->width, p->height, p->video_ctx->pix_fmt,
p->width, p->height, AV_PIX_FMT_RGB24,
SWS_BICUBIC, NULL, NULL, NULL);
if (!p->rgb_frame)
diff --git a/operations/external/ff-save.c b/operations/external/ff-save.c
index 8dfb3ee89894d8f1d7c73496775c95520082467f..320ad029b59ac5066852cb3a1430c6016c084072 100644
--- a/operations/external/ff-save.c
+++ b/operations/external/ff-save.c
@@ -82,6 +82,8 @@ property_int (me_subpel_quality, _("me-subpel-quality"), 0)
#include "gegl-op.h"
+#include <libavutil/channel_layout.h>
+#include <libavutil/imgutils.h>
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
#include <libavutil/avutil.h>
@@ -106,6 +108,7 @@ typedef struct
AVOutputFormat *fmt;
AVFormatContext *oc;
AVStream *video_st;
+ AVCodecContext *video_ctx;
AVFrame *picture, *tmp_picture;
uint8_t *video_outbuf;
@@ -117,6 +120,7 @@ typedef struct
* using gggl directly,. without needing to link with the oxide library
*/
AVStream *audio_st;
+ AVCodecContext *audio_ctx;
uint32_t sample_rate;
uint32_t bits;
@@ -247,8 +251,6 @@ init (GeglProperties *o)
if (!inited)
{
- av_register_all ();
- avcodec_register_all ();
inited = 1;
}
@@ -282,7 +284,7 @@ static void write_audio_frame (GeglProperties *o,
static AVStream *
add_audio_stream (GeglProperties *o, AVFormatContext * oc, int codec_id)
{
- AVCodecContext *c;
+ AVCodecParameters *cp;
AVStream *st;
st = avformat_new_stream (oc, NULL);
@@ -292,12 +294,26 @@ add_audio_stream (GeglProperties *o, AVFormatContext * oc, int codec_id)
exit (1);
}
- c = st->codec;
- c->codec_id = codec_id;
- c->codec_type = AVMEDIA_TYPE_AUDIO;
+ cp = st->codecpar;
+ cp->codec_id = codec_id;
+ cp->codec_type = AVMEDIA_TYPE_AUDIO;
+ cp->bit_rate = o->audio_bit_rate * 1000;
- if (oc->oformat->flags & AVFMT_GLOBALHEADER)
- c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
+ if (o->audio_sample_rate == -1)
+ {
+ if (o->audio)
+ {
+ if (gegl_audio_fragment_get_sample_rate (o->audio) == 0)
+ {
+ gegl_audio_fragment_set_sample_rate (o->audio, 48000); // XXX: should skip adding audiostream instead
+ }
+ o->audio_sample_rate = gegl_audio_fragment_get_sample_rate (o->audio);
+ }
+ }
+ cp->sample_rate = o->audio_sample_rate;
+
+ cp->channel_layout = AV_CH_LAYOUT_STEREO;
+ cp->channels = 2;
return st;
}
@@ -306,49 +322,44 @@ add_audio_stream (GeglProperties *o, AVFormatContext * oc, int codec_id)
static gboolean
open_audio (GeglProperties *o, AVFormatContext * oc, AVStream * st)
{
+ Priv *p = (Priv*)o->user_data;
AVCodecContext *c;
- AVCodec *codec;
+ AVCodecParameters *cp;
+ const AVCodec *codec;
int i;
- c = st->codec;
+ cp = st->codecpar;
/* find the audio encoder */
- codec = avcodec_find_encoder (c->codec_id);
+ codec = avcodec_find_encoder (cp->codec_id);
if (!codec)
{
+ p->audio_ctx = NULL;
fprintf (stderr, "codec not found\n");
return FALSE;
}
- c->bit_rate = o->audio_bit_rate * 1000;
- c->sample_fmt = codec->sample_fmts ? codec->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
-
- if (o->audio_sample_rate == -1)
- {
- if (o->audio)
+ p->audio_ctx = c = avcodec_alloc_context3 (codec);
+ cp->format = codec->sample_fmts ? codec->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
+ if (codec->supported_samplerates)
{
- if (gegl_audio_fragment_get_sample_rate (o->audio) == 0)
- {
- gegl_audio_fragment_set_sample_rate (o->audio, 48000); // XXX: should skip adding audiostream instead
- }
- o->audio_sample_rate = gegl_audio_fragment_get_sample_rate (o->audio);
+ for (i = 0; codec->supported_samplerates[i]; i++)
+ {
+ if (codec->supported_samplerates[i] == cp->sample_rate)
+ break;
+ }
+ if (!codec->supported_samplerates[i])
+ cp->sample_rate = codec->supported_samplerates[0];
}
- }
- c->sample_rate = o->audio_sample_rate;
- c->channel_layout = AV_CH_LAYOUT_STEREO;
- c->channels = 2;
-
-
- if (codec->supported_samplerates)
- {
- c->sample_rate = codec->supported_samplerates[0];
- for (i = 0; codec->supported_samplerates[i]; i++)
+ if (avcodec_parameters_to_context (c, cp) < 0)
{
- if (codec->supported_samplerates[i] == o->audio_sample_rate)
- c->sample_rate = o->audio_sample_rate;
+ fprintf (stderr, "cannot copy codec parameters\n");
+ return FALSE;
}
- }
- //st->time_base = (AVRational){1, c->sample_rate};
- st->time_base = (AVRational){1, o->audio_sample_rate};
+ if (p->oc->oformat->flags & AVFMT_GLOBALHEADER)
+ c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
+
+ st->time_base = (AVRational){1, c->sample_rate};
+ //st->time_base = (AVRational){1, o->audio_sample_rate};
c->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL; // ffmpeg AAC is not quite stable yet
@@ -358,7 +369,11 @@ open_audio (GeglProperties *o, AVFormatContext * oc, AVStream * st)
fprintf (stderr, "could not open codec\n");
return FALSE;
}
-
+ if (avcodec_parameters_from_context (cp, c) < 0)
+ {
+ fprintf (stderr, "cannot copy back the audio codec parameters\n");
+ return FALSE;
+ }
return TRUE;
}
@@ -393,19 +408,13 @@ static void encode_audio_fragments (Priv *p, AVFormatContext *oc, AVStream *st,
{
while (p->audio_pos - p->audio_read_pos > frame_size)
{
- AVCodecContext *c = st->codec;
+ AVCodecContext *c = p->audio_ctx;
long i;
int ret;
- int got_packet = 0;
static AVPacket pkt = { 0 }; /* XXX: static, should be stored in instance somehow */
AVFrame *frame = alloc_audio_frame (c->sample_fmt, c->channel_layout,
c->sample_rate, frame_size);
- if (pkt.size == 0)
- {
- av_init_packet (&pkt);
- }
-
av_frame_make_writable (frame);
switch (c->sample_fmt) {
case AV_SAMPLE_FMT_FLT:
@@ -469,19 +478,34 @@ static void encode_audio_fragments (Priv *p, AVFormatContext *oc, AVStream *st,
frame->pts = p->next_apts;
p->next_apts += frame_size;
- //ret = avcodec_send_frame (c, frame);
- ret = avcodec_encode_audio2 (c, &pkt, frame, &got_packet);
-
- if (ret < 0) {
- fprintf (stderr, "Error encoding audio frame: %s\n", av_err2str (ret));
- }
- if (got_packet)
- {
- av_packet_rescale_ts (&pkt, st->codec->time_base, st->time_base);
- pkt.stream_index = st->index;
- av_interleaved_write_frame (oc, &pkt);
- av_packet_unref (&pkt);
- }
+ ret = avcodec_send_frame (c, frame);
+ if (ret < 0)
+ {
+ fprintf (stderr, "avcodec_send_frame failed: %s\n", av_err2str (ret));
+ }
+ while (ret == 0)
+ {
+ if (pkt.size == 0)
+ {
+ av_init_packet (&pkt);
+ }
+ ret = avcodec_receive_packet (c, &pkt);
+ if (ret == AVERROR(EAGAIN))
+ {
+ // no more packets; should send the next frame now
+ }
+ else if (ret < 0)
+ {
+ fprintf (stderr, "avcodec_receive_packet failed: %s\n", av_err2str (ret));
+ }
+ else
+ {
+ av_packet_rescale_ts (&pkt, c->time_base, st->time_base);
+ pkt.stream_index = st->index;
+ av_interleaved_write_frame (oc, &pkt);
+ av_packet_unref (&pkt);
+ }
+ }
av_frame_free (&frame);
p->audio_read_pos += frame_size;
}
@@ -492,7 +516,7 @@ void
write_audio_frame (GeglProperties *o, AVFormatContext * oc, AVStream * st)
{
Priv *p = (Priv*)o->user_data;
- AVCodecContext *c = st->codec;
+ AVCodecContext *c = p->audio_ctx;
int sample_count = 100000;
if (o->audio)
@@ -549,8 +573,7 @@ write_audio_frame (GeglProperties *o, AVFormatContext * oc, AVStream * st)
void
close_audio (Priv * p, AVFormatContext * oc, AVStream * st)
{
- avcodec_close (st->codec);
-
+ avcodec_free_context (&p->audio_ctx);
}
/* add a video output stream */
@@ -559,7 +582,7 @@ add_video_stream (GeglProperties *o, AVFormatContext * oc, int codec_id)
{
Priv *p = (Priv*)o->user_data;
- AVCodecContext *c;
+ AVCodecParameters *cp;
AVStream *st;
st = avformat_new_stream (oc, NULL);
@@ -569,78 +592,23 @@ add_video_stream (GeglProperties *o, AVFormatContext * oc, int codec_id)
exit (1);
}
- c = st->codec;
- c->codec_id = codec_id;
- c->codec_type = AVMEDIA_TYPE_VIDEO;
+ cp = st->codecpar;
+ cp->codec_id = codec_id;
+ cp->codec_type = AVMEDIA_TYPE_VIDEO;
/* put sample propeters */
- c->bit_rate = o->video_bit_rate * 1000;
+ cp->bit_rate = o->video_bit_rate * 1000;
#ifdef USE_FINE_GRAINED_FFMPEG
- c->rc_min_rate = o->video_bit_rate_min * 1000;
- c->rc_max_rate = o->video_bit_rate_max * 1000;
+ cp->rc_min_rate = o->video_bit_rate_min * 1000;
+ cp->rc_max_rate = o->video_bit_rate_max * 1000;
if (o->video_bit_rate_tolerance >= 0)
- c->bit_rate_tolerance = o->video_bit_rate_tolerance * 1000;
+ cp->bit_rate_tolerance = o->video_bit_rate_tolerance * 1000;
#endif
/* resolution must be a multiple of two */
- c->width = p->width;
- c->height = p->height;
+ cp->width = p->width;
+ cp->height = p->height;
/* frames per second */
st->time_base =(AVRational){1000, o->frame_rate * 1000};
- c->time_base = st->time_base;
-
- c->pix_fmt = AV_PIX_FMT_YUV420P;
-
- if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO)
- {
- c->max_b_frames = 2;
- }
-
- if (c->codec_id == AV_CODEC_ID_H264)
- {
- c->qcompress = 0.6; // qcomp=0.6
- c->me_range = 16; // me_range=16
- c->gop_size = 250; // g=250
- c->max_b_frames = 3; // bf=3
- }
-
- if (o->video_bufsize)
- c->rc_buffer_size = o->video_bufsize * 1000;
-#if USE_FINE_GRAINED_FFMPEG
- if (o->global_quality)
- c->global_quality = o->global_quality;
- if (o->qcompress != 0.0)
- c->qcompress = o->qcompress;
- if (o->qblur != 0.0)
- c->qblur = o->qblur;
- if (o->max_qdiff != 0)
- c->max_qdiff = o->max_qdiff;
- if (o->me_subpel_quality != 0)
- c->me_subpel_quality = o->me_subpel_quality;
- if (o->i_quant_factor != 0.0)
- c->i_quant_factor = o->i_quant_factor;
- if (o->i_quant_offset != 0.0)
- c->i_quant_offset = o->i_quant_offset;
- if (o->max_b_frames)
- c->max_b_frames = o->max_b_frames;
- if (o->me_range)
- c->me_range = o->me_range;
- if (o->noise_reduction)
- c->noise_reduction = o->noise_reduction;
- if (o->scenechange_threshold)
- c->scenechange_threshold = o->scenechange_threshold;
- if (o->trellis)
- c->trellis = o->trellis;
- if (o->qmin)
- c->qmin = o->qmin;
- if (o->qmax)
- c->qmax = o->qmax;
- if (o->gop_size)
- c->gop_size = o->gop_size;
- if (o->keyint_min)
- c->keyint_min = o->keyint_min;
-#endif
-
- if (oc->oformat->flags & AVFMT_GLOBALHEADER)
- c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
+ cp->format = AV_PIX_FMT_YUV420P;
return st;
}
@@ -656,14 +624,15 @@ alloc_picture (int pix_fmt, int width, int height)
picture = av_frame_alloc ();
if (!picture)
return NULL;
- size = avpicture_get_size (pix_fmt, width + 1, height + 1);
+ size = av_image_get_buffer_size(pix_fmt, width + 1, height + 1, 1);
picture_buf = malloc (size);
if (!picture_buf)
{
av_free (picture);
return NULL;
}
- avpicture_fill ((AVPicture *) picture, picture_buf, pix_fmt, width, height);
+ av_image_fill_arrays (picture->data, picture->linesize,
+ picture_buf, pix_fmt, width, height, 1);
return picture;
}
@@ -671,36 +640,99 @@ static gboolean
open_video (GeglProperties *o, AVFormatContext * oc, AVStream * st)
{
Priv *p = (Priv*)o->user_data;
- AVCodec *codec;
+ const AVCodec *codec;
AVCodecContext *c;
+ AVCodecParameters *cp;
AVDictionary *codec_options = {0};
int ret;
- c = st->codec;
+ cp = st->codecpar;
/* find the video encoder */
- codec = avcodec_find_encoder (c->codec_id);
+ codec = avcodec_find_encoder (cp->codec_id);
if (!codec)
{
+ p->video_ctx = NULL;
fprintf (stderr, "codec not found\n");
return FALSE;
}
-
- if (codec->pix_fmts){
- int i = 0;
- c->pix_fmt = codec->pix_fmts[0];
- while (codec->pix_fmts[i] !=-1)
+ p->video_ctx = c = avcodec_alloc_context3 (codec);
+ if (codec->pix_fmts)
{
- if (codec->pix_fmts[i] == AV_PIX_FMT_RGB24)
- c->pix_fmt = AV_PIX_FMT_RGB24;
- i++;
+ int i = 0;
+ cp->format = codec->pix_fmts[0];
+ while (codec->pix_fmts[i] != -1)
+ {
+ if (codec->pix_fmts[i] == AV_PIX_FMT_RGB24)
+ {
+ cp->format = AV_PIX_FMT_RGB24;
+ break;
+ }
+ i++;
+ }
}
- }
+ if (avcodec_parameters_to_context (c, cp) < 0)
+ {
+ fprintf (stderr, "cannot copy codec parameters\n");
+ return FALSE;
+ }
+ c->time_base = st->time_base;
+ if (cp->codec_id == AV_CODEC_ID_MPEG2VIDEO)
+ {
+ c->max_b_frames = 2;
+ }
+ if (cp->codec_id == AV_CODEC_ID_H264)
+ {
+ c->qcompress = 0.6; // qcomp=0.6
+ c->me_range = 16; // me_range=16
+ c->gop_size = 250; // g=250
+ c->max_b_frames = 3; // bf=3
+ }
+ if (o->video_bufsize)
+ c->rc_buffer_size = o->video_bufsize * 1000;
+ if (p->oc->oformat->flags & AVFMT_GLOBALHEADER)
+ c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
+
#if 0
if (o->video_preset[0])
av_dict_set (&codec_options, "preset", o->video_preset, 0);
#endif
+#if USE_FINE_GRAINED_FFMPEG
+ if (o->global_quality)
+ c->global_quality = o->global_quality;
+ if (o->qcompress != 0.0)
+ c->qcompress = o->qcompress;
+ if (o->qblur != 0.0)
+ c->qblur = o->qblur;
+ if (o->max_qdiff != 0)
+ c->max_qdiff = o->max_qdiff;
+ if (o->me_subpel_quality != 0)
+ c->me_subpel_quality = o->me_subpel_quality;
+ if (o->i_quant_factor != 0.0)
+ c->i_quant_factor = o->i_quant_factor;
+ if (o->i_quant_offset != 0.0)
+ c->i_quant_offset = o->i_quant_offset;
+ if (o->max_b_frames)
+ c->max_b_frames = o->max_b_frames;
+ if (o->me_range)
+ c->me_range = o->me_range;
+ if (o->noise_reduction)
+ c->noise_reduction = o->noise_reduction;
+ if (o->scenechange_threshold)
+ c->scenechange_threshold = o->scenechange_threshold;
+ if (o->trellis)
+ c->trellis = o->trellis;
+ if (o->qmin)
+ c->qmin = o->qmin;
+ if (o->qmax)
+ c->qmax = o->qmax;
+ if (o->gop_size)
+ c->gop_size = o->gop_size;
+ if (o->keyint_min)
+ c->keyint_min = o->keyint_min;
+#endif
+
/* open the codec */
if ((ret = avcodec_open2 (c, codec, &codec_options)) < 0)
{
@@ -739,6 +771,11 @@ open_video (GeglProperties *o, AVFormatContext * oc, AVStream * st)
return FALSE;
}
}
+ if (avcodec_parameters_from_context (cp, c) < 0)
+ {
+ fprintf (stderr, "cannot copy back the video codec parameters\n");
+ return FALSE;
+ }
return TRUE;
}
@@ -746,7 +783,7 @@ open_video (GeglProperties *o, AVFormatContext * oc, AVStream * st)
static void
close_video (Priv * p, AVFormatContext * oc, AVStream * st)
{
- avcodec_close (st->codec);
+ avcodec_free_context (&p->video_ctx);
av_free (p->picture->data[0]);
av_free (p->picture);
if (p->tmp_picture)
@@ -778,7 +815,7 @@ write_video_frame (GeglProperties *o,
AVCodecContext *c;
AVFrame *picture_ptr;
- c = st->codec;
+ c = p->video_ctx;
if (c->pix_fmt != AV_PIX_FMT_RGB24)
{
@@ -838,15 +875,34 @@ write_video_frame (GeglProperties *o,
else
#endif
{
- /* encode the image */
- AVPacket pkt2;
- int got_packet = 0;
- av_init_packet(&pkt2);
- pkt2.data = p->video_outbuf;
- pkt2.size = p->video_outbuf_size;
-
- out_size = avcodec_encode_video2(c, &pkt2, picture_ptr, &got_packet);
-
+ // int got_packet = 0;
+ int key_frame = 0;
+ ret = avcodec_send_frame (c, picture_ptr);
+ while (ret == 0)
+ {
+ /* encode the image */
+ AVPacket pkt2;
+ av_init_packet(&pkt2);
+ // pkt2 will use its own buffer
+ // we may remove video_outbuf and video_outbuf_size too
+ //pkt2.data = p->video_outbuf;
+ //pkt2.size = p->video_outbuf_size;
+ ret = avcodec_receive_packet (c, &pkt2);
+ if (ret == AVERROR(EAGAIN))
+ {
+ // no more packets
+ ret = 0;
+ break;
+ }
+ else if (ret < 0)
+ {
+ break;
+ }
+ // out_size = 0;
+ // got_packet = 1;
+ key_frame = !!(pkt2.flags & AV_PKT_FLAG_KEY);
+ // coded_frame is removed by https://github.com/FFmpeg/FFmpeg/commit/11bc79089378a5ec00547d0f85bc152afdf30dfa
+ /*
if (!out_size && got_packet && c->coded_frame)
{
c->coded_frame->pts = pkt2.pts;
@@ -854,38 +910,32 @@ write_video_frame (GeglProperties *o,
if (c->codec->capabilities & AV_CODEC_CAP_INTRA_ONLY)
c->coded_frame->pict_type = AV_PICTURE_TYPE_I;
}
-
- if (pkt2.side_data_elems > 0)
- {
- int i;
- for (i = 0; i < pkt2.side_data_elems; i++)
- av_free(pkt2.side_data[i].data);
- av_freep(&pkt2.side_data);
- pkt2.side_data_elems = 0;
- }
-
- if (!out_size)
- out_size = pkt2.size;
-
- /* if zero size, it means the image was buffered */
- if (out_size != 0)
- {
- AVPacket pkt;
- av_init_packet (&pkt);
- if (c->coded_frame->key_frame)
- pkt.flags |= AV_PKT_FLAG_KEY;
- pkt.stream_index = st->index;
- pkt.data = p->video_outbuf;
- pkt.size = out_size;
- pkt.pts = picture_ptr->pts;
- pkt.dts = picture_ptr->pts;
- av_packet_rescale_ts (&pkt, c->time_base, st->time_base);
- /* write the compressed frame in the media file */
- ret = av_write_frame (oc, &pkt);
- }
- else
- {
- ret = 0;
+ */
+ if (pkt2.side_data_elems > 0)
+ {
+ int i;
+ for (i = 0; i < pkt2.side_data_elems; i++)
+ av_free(pkt2.side_data[i].data);
+ av_freep(&pkt2.side_data);
+ pkt2.side_data_elems = 0;
+ }
+ out_size = pkt2.size;
+ /* if zero size, it means the image was buffered */
+ if (out_size != 0)
+ {
+ AVPacket pkt;
+ av_init_packet (&pkt);
+ if (key_frame)
+ pkt.flags |= AV_PKT_FLAG_KEY;
+ pkt.stream_index = st->index;
+ pkt.data = pkt2.data;
+ pkt.size = out_size;
+ pkt.pts = picture_ptr->pts;
+ pkt.dts = picture_ptr->pts;
+ av_packet_rescale_ts (&pkt, c->time_base, st->time_base);
+ /* write the compressed frame in the media file */
+ ret = av_write_frame (oc, &pkt);
+ }
}
}
if (ret != 0)
@@ -901,17 +951,18 @@ tfile (GeglProperties *o)
{
Priv *p = (Priv*)o->user_data;
+ const AVOutputFormat *shared_fmt;
if (strcmp (o->container_format, "auto"))
- p->fmt = av_guess_format (o->container_format, o->path, NULL);
+ shared_fmt = av_guess_format (o->container_format, o->path, NULL);
else
- p->fmt = av_guess_format (NULL, o->path, NULL);
+ shared_fmt = av_guess_format (NULL, o->path, NULL);
- if (!p->fmt)
+ if (!shared_fmt)
{
fprintf (stderr,
"ff_save couldn't deduce outputformat from file extension: using MPEG.\n%s",
"");
- p->fmt = av_guess_format ("mpeg", NULL, NULL);
+ shared_fmt = av_guess_format ("mpeg", NULL, NULL);
}
p->oc = avformat_alloc_context ();
if (!p->oc)
@@ -920,23 +971,25 @@ tfile (GeglProperties *o)
return -1;
}
- p->oc->oformat = p->fmt;
-
- snprintf (p->oc->filename, sizeof (p->oc->filename), "%s", o->path);
+ // The "avio_open" below fills "url" field instead of the "filename"
+ // snprintf (p->oc->filename, sizeof (p->oc->filename), "%s", o->path);
p->video_st = NULL;
p->audio_st = NULL;
+ enum AVCodecID audio_codec = shared_fmt->audio_codec;
+ enum AVCodecID video_codec = shared_fmt->video_codec;
if (strcmp (o->video_codec, "auto"))
{
- AVCodec *codec = avcodec_find_encoder_by_name (o->video_codec);
- p->fmt->video_codec = AV_CODEC_ID_NONE;
+ const AVCodec *codec = avcodec_find_encoder_by_name (o->video_codec);
+ video_codec = AV_CODEC_ID_NONE;
if (codec)
- p->fmt->video_codec = codec->id;
+ video_codec = codec->id;
else
{
fprintf (stderr, "didn't find video encoder \"%s\"\navailable codecs: ", o->video_codec);
- while ((codec = av_codec_next (codec)))
+ void *opaque = NULL;
+ while ((codec = av_codec_iterate (&opaque)))
if (av_codec_is_encoder (codec) &&
avcodec_get_type (codec->id) == AVMEDIA_TYPE_VIDEO)
fprintf (stderr, "%s ", codec->name);
@@ -945,31 +998,36 @@ tfile (GeglProperties *o)
}
if (strcmp (o->audio_codec, "auto"))
{
- AVCodec *codec = avcodec_find_encoder_by_name (o->audio_codec);
- p->fmt->audio_codec = AV_CODEC_ID_NONE;
+ const AVCodec *codec = avcodec_find_encoder_by_name (o->audio_codec);
+ audio_codec = AV_CODEC_ID_NONE;
if (codec)
- p->fmt->audio_codec = codec->id;
+ audio_codec = codec->id;
else
{
fprintf (stderr, "didn't find audio encoder \"%s\"\navailable codecs: ", o->audio_codec);
- while ((codec = av_codec_next (codec)))
+ void *opaque = NULL;
+ while ((codec = av_codec_iterate (&opaque)))
if (av_codec_is_encoder (codec) &&
avcodec_get_type (codec->id) == AVMEDIA_TYPE_AUDIO)
fprintf (stderr, "%s ", codec->name);
fprintf (stderr, "\n");
}
}
+ p->fmt = av_malloc (sizeof(AVOutputFormat));
+ *(p->fmt) = *shared_fmt;
+ p->fmt->video_codec = video_codec;
+ p->fmt->audio_codec = audio_codec;
+ p->oc->oformat = p->fmt;
- if (p->fmt->video_codec != AV_CODEC_ID_NONE)
+ if (video_codec != AV_CODEC_ID_NONE)
{
- p->video_st = add_video_stream (o, p->oc, p->fmt->video_codec);
+ p->video_st = add_video_stream (o, p->oc, video_codec);
}
- if (p->fmt->audio_codec != AV_CODEC_ID_NONE)
+ if (audio_codec != AV_CODEC_ID_NONE)
{
- p->audio_st = add_audio_stream (o, p->oc, p->fmt->audio_codec);
+ p->audio_st = add_audio_stream (o, p->oc, audio_codec);
}
-
if (p->video_st && ! open_video (o, p->oc, p->video_st))
return -1;
@@ -997,25 +1055,35 @@ static void flush_audio (GeglProperties *o)
{
Priv *p = (Priv*)o->user_data;
AVPacket pkt = { 0 };
- int ret;
+ int ret = 0;
- int got_packet = 0;
if (!p->audio_st)
return;
- got_packet = 0;
- av_init_packet (&pkt);
- ret = avcodec_encode_audio2 (p->audio_st->codec, &pkt, NULL, &got_packet);
+ ret = avcodec_send_frame (p->audio_ctx, NULL);
if (ret < 0)
- {
- fprintf (stderr, "audio enc trouble\n");
- }
- if (got_packet)
{
- pkt.stream_index = p->audio_st->index;
- av_packet_rescale_ts (&pkt, p->audio_st->codec->time_base, p->audio_st->time_base);
- av_interleaved_write_frame (p->oc, &pkt);
- av_packet_unref (&pkt);
+ fprintf (stderr, "avcodec_send_frame failed while entering to draining mode: %s\n", av_err2str (ret));
+ }
+ av_init_packet (&pkt);
+ while (ret == 0)
+ {
+ ret = avcodec_receive_packet (p->audio_ctx, &pkt);
+ if (ret == AVERROR_EOF)
+ {
+ // no more packets
+ }
+ else if (ret < 0)
+ {
+ fprintf (stderr, "avcodec_receive_packet failed: %s\n", av_err2str (ret));
+ }
+ else
+ {
+ pkt.stream_index = p->audio_st->index;
+ av_packet_rescale_ts (&pkt, p->audio_ctx->time_base, p->audio_st->time_base);
+ av_interleaved_write_frame (p->oc, &pkt);
+ av_packet_unref (&pkt);
+ }
}
}
@@ -1062,27 +1130,35 @@ process (GeglOperation *operation,
static void flush_video (GeglProperties *o)
{
Priv *p = (Priv*)o->user_data;
- int got_packet = 0;
long ts = p->frame_count;
- do {
- AVPacket pkt = { 0 };
- int ret;
- got_packet = 0;
- av_init_packet (&pkt);
- ret = avcodec_encode_video2 (p->video_st->codec, &pkt, NULL, &got_packet);
- if (ret < 0)
- return;
-
- if (got_packet)
- {
- pkt.stream_index = p->video_st->index;
- pkt.pts = ts;
- pkt.dts = ts++;
- av_packet_rescale_ts (&pkt, p->video_st->codec->time_base, p->video_st->time_base);
- av_interleaved_write_frame (p->oc, &pkt);
- av_packet_unref (&pkt);
- }
- } while (got_packet);
+ AVPacket pkt = { 0 };
+ int ret = 0;
+ ret = avcodec_send_frame (p->video_ctx, NULL);
+ if (ret < 0)
+ {
+ fprintf (stderr, "avcodec_send_frame failed while entering to draining mode: %s\n", av_err2str (ret));
+ }
+ av_init_packet (&pkt);
+ while (ret == 0)
+ {
+ ret = avcodec_receive_packet (p->video_ctx, &pkt);
+ if (ret == AVERROR_EOF)
+ {
+ // no more packets
+ }
+ else if (ret < 0)
+ {
+ }
+ else
+ {
+ pkt.stream_index = p->video_st->index;
+ pkt.pts = ts;
+ pkt.dts = ts++;
+ av_packet_rescale_ts (&pkt, p->video_ctx->time_base, p->video_st->time_base);
+ av_interleaved_write_frame (p->oc, &pkt);
+ av_packet_unref (&pkt);
+ }
+ }
}
static void
@@ -1107,6 +1183,7 @@ finalize (GObject *object)
}
avio_closep (&p->oc->pb);
+ av_freep (&p->fmt);
avformat_free_context (p->oc);
g_clear_pointer (&o->user_data, g_free);