am7xxx-play: add a comment to explain why a run-time warning can be ignored
[libam7xxx.git] / examples / am7xxx-play.c
index aaf941b..5dab320 100644 (file)
 
 #include <libavdevice/avdevice.h>
 #include <libavformat/avformat.h>
+#include <libavutil/imgutils.h>
 #include <libswscale/swscale.h>
 
 #include <am7xxx.h>
 
-/* On some systems ENOTSUP is not defined, fallback to its value on
- * linux which is equal to EOPNOTSUPP which is 95
- */
-#ifndef ENOTSUP
-#define ENOTSUP 95
-#endif
-
 static unsigned int run = 1;
 
 struct video_input_ctx {
@@ -57,10 +51,10 @@ static int video_input_init(struct video_input_ctx *input_ctx,
 {
        AVInputFormat *input_format = NULL;
        AVFormatContext *input_format_ctx;
+       AVCodecParameters *input_codec_params;
        AVCodecContext *input_codec_ctx;
        AVCodec *input_codec;
        int video_index;
-       unsigned int i;
        int ret;
 
        avdevice_register_all();
@@ -105,40 +99,32 @@ static int video_input_init(struct video_input_ctx *input_ctx,
        av_dump_format(input_format_ctx, 0, input_path, 0);
 
        /* look for the first video_stream */
-       video_index = -1;
-       for (i = 0; i < input_format_ctx->nb_streams; i++)
-               if (input_format_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
-                       video_index = i;
-                       break;
-               }
-       if (video_index == -1) {
+       video_index = av_find_best_stream(input_format_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, &input_codec, 0);
+       if (video_index < 0) {
                fprintf(stderr, "cannot find any video streams\n");
-               ret = -ENOTSUP;
+               ret = -EINVAL;
                goto cleanup;
        }
 
-       /* get a pointer to the codec context for the video stream */
-       input_codec_ctx = input_format_ctx->streams[video_index]->codec;
+       input_codec_ctx = avcodec_alloc_context3(input_codec);
        if (input_codec_ctx == NULL) {
-               fprintf(stderr, "input codec context is not valid\n");
-               ret = -ENOTSUP;
+               fprintf(stderr, "failed to allocate the input codec context\n");
+               ret = -ENOMEM;
                goto cleanup;
        }
 
-       /* find the decoder for the video stream */
-       input_codec = avcodec_find_decoder(input_codec_ctx->codec_id);
-       if (input_codec == NULL) {
-               fprintf(stderr, "input_codec is NULL!\n");
-               ret = -ENOTSUP;
-               goto cleanup;
+       input_codec_params = input_format_ctx->streams[video_index]->codecpar;
+       ret = avcodec_parameters_to_context(input_codec_ctx, input_codec_params);
+       if (ret < 0) {
+               fprintf(stderr, "cannot copy parameters to input codec context\n");
+               goto cleanup_ctx;
        }
 
        /* open the decoder */
        ret = avcodec_open2(input_codec_ctx, input_codec, NULL);
        if (ret < 0) {
                fprintf(stderr, "cannot open input codec\n");
-               ret = -ENOTSUP;
-               goto cleanup;
+               goto cleanup_ctx;
        }
 
        input_ctx->format_ctx = input_format_ctx;
@@ -148,6 +134,8 @@ static int video_input_init(struct video_input_ctx *input_ctx,
        ret = 0;
        goto out;
 
+cleanup_ctx:
+       avcodec_free_context(&input_codec_ctx);
 cleanup:
        avformat_close_input(&input_format_ctx);
 out:
@@ -189,7 +177,7 @@ static int video_output_init(struct video_output_ctx *output_ctx,
                goto out;
        }
 
-       /* Calculate the new output dimension so the original picture is shown
+       /* Calculate the new output dimension so the original frame is shown
         * in its entirety */
        ret = am7xxx_calc_scaled_image_dimensions(dev,
                                                  upscale,
@@ -223,6 +211,7 @@ static int video_output_init(struct video_output_ctx *output_ctx,
                goto out;
        }
 
+       /* YUVJ420P is deprecated in swscaler, but mjpeg still relies on it. */
        output_codec_ctx->pix_fmt    = AV_PIX_FMT_YUVJ420P;
        output_codec_ctx->codec_id   = AV_CODEC_ID_MJPEG;
        output_codec_ctx->codec_type = AVMEDIA_TYPE_VIDEO;
@@ -235,16 +224,16 @@ static int video_output_init(struct video_output_ctx *output_ctx,
         * in particular they won't be 0, this is needed because they are used
         * as divisor somewhere in the encoding process */
        output_codec_ctx->qmin       = output_codec_ctx->qmax = ((100 - (quality - 1)) * FF_QUALITY_SCALE) / 100;
-       output_codec_ctx->mb_lmin    = output_codec_ctx->lmin = output_codec_ctx->qmin * FF_QP2LAMBDA;
-       output_codec_ctx->mb_lmax    = output_codec_ctx->lmax = output_codec_ctx->qmax * FF_QP2LAMBDA;
-       output_codec_ctx->flags      |= CODEC_FLAG_QSCALE;
+       output_codec_ctx->mb_lmin    = output_codec_ctx->qmin * FF_QP2LAMBDA;
+       output_codec_ctx->mb_lmax    = output_codec_ctx->qmax * FF_QP2LAMBDA;
+       output_codec_ctx->flags      |= AV_CODEC_FLAG_QSCALE;
        output_codec_ctx->global_quality = output_codec_ctx->qmin * FF_QP2LAMBDA;
 
        /* find the encoder */
        output_codec = avcodec_find_encoder(output_codec_ctx->codec_id);
        if (output_codec == NULL) {
                fprintf(stderr, "cannot find output codec!\n");
-               ret = -ENOTSUP;
+               ret = -EINVAL;
                goto cleanup;
        }
 
@@ -262,13 +251,73 @@ static int video_output_init(struct video_output_ctx *output_ctx,
        goto out;
 
 cleanup:
-       avcodec_close(output_codec_ctx);
-       av_free(output_codec_ctx);
+       avcodec_free_context(&output_codec_ctx);
 out:
        return ret;
 }
 
 
+/*
+ * Wrap the new avcodec API from FFMpeg 3.1 to minimize the changes in the
+ * user code.
+ *
+ * If the use of the wrappers were to be made conditional, a check like the
+ * following could be used:
+ *
+ *     #if (LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(57, 48, 101))
+ *
+ * As derived from the APIchanges document:
+ * https://github.com/FFmpeg/FFmpeg/blob/master/doc/APIchanges
+ *
+ * The wrapper implementation has been taken from:
+ * https://blogs.gentoo.org/lu_zero/2016/03/29/new-avcodec-api/
+ */
+static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
+{
+       int ret;
+
+       *got_frame = 0;
+
+       if (pkt) {
+               ret = avcodec_send_packet(avctx, pkt);
+               /*
+                * In particular, we don't expect AVERROR(EAGAIN), because we
+                * read all decoded frames with avcodec_receive_frame() until
+                * done.
+                */
+               if (ret < 0)
+                       return ret == AVERROR_EOF ? 0 : ret;
+       }
+
+       ret = avcodec_receive_frame(avctx, frame);
+       if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
+               return ret;
+       if (ret >= 0)
+               *got_frame = 1;
+
+       return 0;
+}
+
+static int encode(AVCodecContext *avctx, AVPacket *pkt, int *got_packet, AVFrame *frame)
+{
+       int ret;
+
+       *got_packet = 0;
+
+       ret = avcodec_send_frame(avctx, frame);
+       if (ret < 0)
+               return ret;
+
+       ret = avcodec_receive_packet(avctx, pkt);
+       if (!ret)
+               *got_packet = 1;
+       if (ret == AVERROR(EAGAIN))
+               return 0;
+
+       return ret;
+}
+
+
 static int am7xxx_play(const char *input_format_string,
                       AVDictionary **input_options,
                       const char *input_path,
@@ -281,16 +330,16 @@ static int am7xxx_play(const char *input_format_string,
 {
        struct video_input_ctx input_ctx;
        struct video_output_ctx output_ctx;
-       AVFrame *picture_raw;
-       AVFrame *picture_scaled;
+       AVFrame *frame_raw;
+       AVFrame *frame_scaled;
        int out_buf_size;
        uint8_t *out_buf;
-       int out_picture_size;
-       uint8_t *out_picture;
+       int out_frame_size;
+       uint8_t *out_frame;
        struct SwsContext *sw_scale_ctx;
        AVPacket in_packet;
        AVPacket out_packet;
-       int got_picture;
+       int got_frame;
        int got_packet;
        int ret;
 
@@ -307,41 +356,44 @@ static int am7xxx_play(const char *input_format_string,
        }
 
        /* allocate an input frame */
-       picture_raw = av_frame_alloc();
-       if (picture_raw == NULL) {
-               fprintf(stderr, "cannot allocate the raw picture frame!\n");
+       frame_raw = av_frame_alloc();
+       if (frame_raw == NULL) {
+               fprintf(stderr, "cannot allocate the raw frame!\n");
                ret = -ENOMEM;
                goto cleanup_output;
        }
 
        /* allocate output frame */
-       picture_scaled = av_frame_alloc();
-       if (picture_scaled == NULL) {
-               fprintf(stderr, "cannot allocate the scaled picture!\n");
+       frame_scaled = av_frame_alloc();
+       if (frame_scaled == NULL) {
+               fprintf(stderr, "cannot allocate the scaled frame!\n");
                ret = -ENOMEM;
-               goto cleanup_picture_raw;
+               goto cleanup_frame_raw;
        }
-       picture_scaled->format = (output_ctx.codec_ctx)->pix_fmt;
-       picture_scaled->width = (output_ctx.codec_ctx)->width;
-       picture_scaled->height = (output_ctx.codec_ctx)->height;
+       frame_scaled->format = (output_ctx.codec_ctx)->pix_fmt;
+       frame_scaled->width = (output_ctx.codec_ctx)->width;
+       frame_scaled->height = (output_ctx.codec_ctx)->height;
 
        /* calculate the bytes needed for the output image and create buffer for the output image */
-       out_buf_size = avpicture_get_size((output_ctx.codec_ctx)->pix_fmt,
-                                         (output_ctx.codec_ctx)->width,
-                                         (output_ctx.codec_ctx)->height);
+       out_buf_size = av_image_get_buffer_size((output_ctx.codec_ctx)->pix_fmt,
+                                               (output_ctx.codec_ctx)->width,
+                                               (output_ctx.codec_ctx)->height,
+                                               1);
        out_buf = av_malloc(out_buf_size * sizeof(uint8_t));
        if (out_buf == NULL) {
                fprintf(stderr, "cannot allocate output data buffer!\n");
                ret = -ENOMEM;
-               goto cleanup_picture_scaled;
+               goto cleanup_frame_scaled;
        }
 
-       /* assign appropriate parts of buffer to image planes in picture_scaled */
-       avpicture_fill((AVPicture *)picture_scaled,
-                      out_buf,
-                      (output_ctx.codec_ctx)->pix_fmt,
-                      (output_ctx.codec_ctx)->width,
-                      (output_ctx.codec_ctx)->height);
+       /* assign appropriate parts of buffer to image planes in frame_scaled */
+       av_image_fill_arrays(frame_scaled->data,
+                            frame_scaled->linesize,
+                            out_buf,
+                            (output_ctx.codec_ctx)->pix_fmt,
+                            (output_ctx.codec_ctx)->width,
+                            (output_ctx.codec_ctx)->height,
+                            1);
 
        sw_scale_ctx = sws_getCachedContext(NULL,
                                            (input_ctx.codec_ctx)->width,
@@ -378,8 +430,8 @@ static int am7xxx_play(const char *input_format_string,
                }
 
                /* decode */
-               got_picture = 0;
-               ret = avcodec_decode_video2(input_ctx.codec_ctx, picture_raw, &got_picture, &in_packet);
+               got_frame = 0;
+               ret = decode(input_ctx.codec_ctx, frame_raw, &got_frame, &in_packet);
                if (ret < 0) {
                        fprintf(stderr, "cannot decode video\n");
                        run = 0;
@@ -387,37 +439,41 @@ static int am7xxx_play(const char *input_format_string,
                }
 
                /* if we got the complete frame */
-               if (got_picture) {
-                       /* convert it to YUV */
+               if (got_frame) {
+                       /* 
+                        * Rescaling the frame also changes its pixel format
+                        * to the raw format supported by the projector if
+                        * this was set in video_output_init()
+                        */
                        sws_scale(sw_scale_ctx,
-                                 (const uint8_t * const *)picture_raw->data,
-                                 picture_raw->linesize,
+                                 (const uint8_t * const *)frame_raw->data,
+                                 frame_raw->linesize,
                                  0,
                                  (input_ctx.codec_ctx)->height,
-                                 picture_scaled->data,
-                                 picture_scaled->linesize);
+                                 frame_scaled->data,
+                                 frame_scaled->linesize);
 
                        if (output_ctx.raw_output) {
-                               out_picture = out_buf;
-                               out_picture_size = out_buf_size;
+                               out_frame = out_buf;
+                               out_frame_size = out_buf_size;
                        } else {
-                               picture_scaled->quality = (output_ctx.codec_ctx)->global_quality;
+                               frame_scaled->quality = (output_ctx.codec_ctx)->global_quality;
                                av_init_packet(&out_packet);
                                out_packet.data = NULL;
                                out_packet.size = 0;
                                got_packet = 0;
-                               ret = avcodec_encode_video2(output_ctx.codec_ctx,
-                                                           &out_packet,
-                                                           picture_scaled,
-                                                           &got_packet);
+                               ret = encode(output_ctx.codec_ctx,
+                                            &out_packet,
+                                            &got_packet,
+                                            frame_scaled);
                                if (ret < 0 || !got_packet) {
                                        fprintf(stderr, "cannot encode video\n");
                                        run = 0;
                                        goto end_while;
                                }
 
-                               out_picture = out_packet.data;
-                               out_picture_size = out_packet.size;
+                               out_frame = out_packet.data;
+                               out_frame_size = out_packet.size;
                        }
 
 #ifdef DEBUG
@@ -429,7 +485,7 @@ static int am7xxx_play(const char *input_format_string,
                                else
                                        snprintf(filename, NAME_MAX, "out.raw");
                                file = fopen(filename, "wb");
-                               fwrite(out_picture, 1, out_picture_size, file);
+                               fwrite(out_frame, 1, out_frame_size, file);
                                fclose(file);
                        }
 #else
@@ -440,8 +496,8 @@ static int am7xxx_play(const char *input_format_string,
                                                      image_format,
                                                      (output_ctx.codec_ctx)->width,
                                                      (output_ctx.codec_ctx)->height,
-                                                     out_picture,
-                                                     out_picture_size);
+                                                     out_frame,
+                                                     out_frame_size);
                        if (ret < 0) {
                                perror("am7xxx_send_image_async");
                                run = 0;
@@ -450,27 +506,28 @@ static int am7xxx_play(const char *input_format_string,
                }
 end_while:
                if (!output_ctx.raw_output && got_packet)
-                       av_free_packet(&out_packet);
-               av_free_packet(&in_packet);
+                       av_packet_unref(&out_packet);
+               av_packet_unref(&in_packet);
        }
 
        sws_freeContext(sw_scale_ctx);
 cleanup_out_buf:
        av_free(out_buf);
-cleanup_picture_scaled:
-       av_frame_free(&picture_scaled);
-cleanup_picture_raw:
-       av_frame_free(&picture_raw);
+cleanup_frame_scaled:
+       av_frame_free(&frame_scaled);
+cleanup_frame_raw:
+       av_frame_free(&frame_raw);
 
 cleanup_output:
-       /* av_free is needed as well,
-        * see http://libav.org/doxygen/master/avcodec_8h.html#a5d7440cd7ea195bd0b14f21a00ef36dd
+       /* Freeing the codec context is needed as well,
+        * see https://libav.org/documentation/doxygen/master/group__lavc__core.html#gaf4daa92361efb3523ef5afeb0b54077f
         */
        avcodec_close(output_ctx.codec_ctx);
-       av_free(output_ctx.codec_ctx);
+       avcodec_free_context(&(output_ctx.codec_ctx));
 
 cleanup_input:
        avcodec_close(input_ctx.codec_ctx);
+       avcodec_free_context(&(input_ctx.codec_ctx));
        avformat_close_input(&(input_ctx.format_ctx));
 
 out:
@@ -548,7 +605,7 @@ static char *get_x_screen_size(const char *input_path)
 static char *get_x_screen_size(const char *input_path)
 {
        (void) input_path;
-       fprintf(stderr, "%s: fallback implementation\n", __func__);
+       fprintf(stderr, "%s: fallback implementation, assuming a vga screen\n", __func__);
        return strdup("vga");
 }
 #endif