/*
* am7xxx-play - play stuff on an am7xxx device (e.g. Acer C110, PicoPix 1020)
*
- * Copyright (C) 2012 Antonio Ospite <ospite@studenti.unina.it>
+ * Copyright (C) 2012-2014 Antonio Ospite <ao2@ao2.it>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+/**
+ * @example examples/am7xxx-play.c
+ * am7xxx-play uses libavdevice, libavformat, libavcodec and libswscale to
+ * decode the input, encode it to jpeg and display it with libam7xxx.
+ */
+
#include <stdio.h>
#include <stdint.h>
#include <string.h>
#include <libavdevice/avdevice.h>
#include <libavformat/avformat.h>
+#include <libavutil/imgutils.h>
#include <libswscale/swscale.h>
#include <am7xxx.h>
+/* On some systems ENOTSUP is not defined, fallback to its value on
+ * linux which is equal to EOPNOTSUPP which is 95
+ */
+#ifndef ENOTSUP
+#define ENOTSUP 95
+#endif
+
static unsigned int run = 1;
struct video_input_ctx {
/* get information on the input stream (e.g. format, bitrate, framerate) */
ret = avformat_find_stream_info(input_format_ctx, NULL);
if (ret < 0) {
- fprintf(stderr, "cannot get information on the stream");
+ fprintf(stderr, "cannot get information on the stream\n");
goto cleanup;
}
/* find the decoder for the video stream */
input_codec = avcodec_find_decoder(input_codec_ctx->codec_id);
if (input_codec == NULL) {
- fprintf(stderr, "input_codec is NULL!");
+ fprintf(stderr, "input_codec is NULL!\n");
ret = -ENOTSUP;
goto cleanup;
}
int ret;
if (input_ctx == NULL) {
- fprintf(stderr, "input_ctx must not be NULL!");
+ fprintf(stderr, "input_ctx must not be NULL!\n");
ret = -EINVAL;
goto out;
}
/* create the encoder context */
output_codec_ctx = avcodec_alloc_context3(NULL);
if (output_codec_ctx == NULL) {
- fprintf(stderr, "cannot allocate output codec context!");
+ fprintf(stderr, "cannot allocate output codec context!\n");
ret = -ENOMEM;
goto out;
}
output_codec_ctx->bit_rate = (input_ctx->codec_ctx)->bit_rate;
output_codec_ctx->width = new_output_width;
output_codec_ctx->height = new_output_height;
- output_codec_ctx->time_base.num = (input_ctx->codec_ctx)->time_base.num;
- output_codec_ctx->time_base.den = (input_ctx->codec_ctx)->time_base.den;
+ output_codec_ctx->time_base.num =
+ (input_ctx->format_ctx)->streams[input_ctx->video_stream_index]->time_base.num;
+ output_codec_ctx->time_base.den =
+ (input_ctx->format_ctx)->streams[input_ctx->video_stream_index]->time_base.den;
/* When the raw format is requested we don't actually need to setup
* and open a decoder
*/
if (image_format == AM7XXX_IMAGE_FORMAT_NV12) {
fprintf(stdout, "using raw output format\n");
- output_codec_ctx->pix_fmt = PIX_FMT_NV12;
+ output_codec_ctx->pix_fmt = AV_PIX_FMT_NV12;
output_ctx->codec_ctx = output_codec_ctx;
output_ctx->raw_output = 1;
ret = 0;
goto out;
}
- output_codec_ctx->pix_fmt = PIX_FMT_YUVJ420P;
- output_codec_ctx->codec_id = CODEC_ID_MJPEG;
+ output_codec_ctx->pix_fmt = AV_PIX_FMT_YUVJ420P;
+ output_codec_ctx->codec_id = AV_CODEC_ID_MJPEG;
output_codec_ctx->codec_type = AVMEDIA_TYPE_VIDEO;
/* Set quality and other VBR settings */
* in particular they won't be 0, this is needed because they are used
* as divisor somewhere in the encoding process */
output_codec_ctx->qmin = output_codec_ctx->qmax = ((100 - (quality - 1)) * FF_QUALITY_SCALE) / 100;
- output_codec_ctx->mb_lmin = output_codec_ctx->lmin = output_codec_ctx->qmin * FF_QP2LAMBDA;
- output_codec_ctx->mb_lmax = output_codec_ctx->lmax = output_codec_ctx->qmax * FF_QP2LAMBDA;
+ output_codec_ctx->mb_lmin = output_codec_ctx->qmin * FF_QP2LAMBDA;
+ output_codec_ctx->mb_lmax = output_codec_ctx->qmax * FF_QP2LAMBDA;
output_codec_ctx->flags |= CODEC_FLAG_QSCALE;
output_codec_ctx->global_quality = output_codec_ctx->qmin * FF_QP2LAMBDA;
unsigned int upscale,
unsigned int quality,
am7xxx_image_format image_format,
- am7xxx_device *dev)
+ am7xxx_device *dev,
+ int dump_frame)
{
struct video_input_ctx input_ctx;
struct video_output_ctx output_ctx;
int out_buf_size;
uint8_t *out_buf;
int out_picture_size;
+ uint8_t *out_picture;
struct SwsContext *sw_scale_ctx;
- AVPacket packet;
+ AVPacket in_packet;
+ AVPacket out_packet;
int got_picture;
- int ret = 0;
+ int got_packet;
+ int ret;
ret = video_input_init(&input_ctx, input_format_string, input_path, input_options);
if (ret < 0) {
}
/* allocate an input frame */
- picture_raw = avcodec_alloc_frame();
+ picture_raw = av_frame_alloc();
if (picture_raw == NULL) {
- fprintf(stderr, "cannot allocate the raw picture frame!");
+ fprintf(stderr, "cannot allocate the raw picture frame!\n");
+ ret = -ENOMEM;
goto cleanup_output;
}
/* allocate output frame */
- picture_scaled = avcodec_alloc_frame();
+ picture_scaled = av_frame_alloc();
if (picture_scaled == NULL) {
fprintf(stderr, "cannot allocate the scaled picture!\n");
+ ret = -ENOMEM;
goto cleanup_picture_raw;
}
+ picture_scaled->format = (output_ctx.codec_ctx)->pix_fmt;
+ picture_scaled->width = (output_ctx.codec_ctx)->width;
+ picture_scaled->height = (output_ctx.codec_ctx)->height;
/* calculate the bytes needed for the output image and create buffer for the output image */
- out_buf_size = avpicture_get_size((output_ctx.codec_ctx)->pix_fmt,
- (output_ctx.codec_ctx)->width,
- (output_ctx.codec_ctx)->height);
+ out_buf_size = av_image_get_buffer_size((output_ctx.codec_ctx)->pix_fmt,
+ (output_ctx.codec_ctx)->width,
+ (output_ctx.codec_ctx)->height,
+ 1);
out_buf = av_malloc(out_buf_size * sizeof(uint8_t));
if (out_buf == NULL) {
fprintf(stderr, "cannot allocate output data buffer!\n");
+ ret = -ENOMEM;
goto cleanup_picture_scaled;
}
/* assign appropriate parts of buffer to image planes in picture_scaled */
- avpicture_fill((AVPicture *)picture_scaled,
- out_buf,
- (output_ctx.codec_ctx)->pix_fmt,
- (output_ctx.codec_ctx)->width,
- (output_ctx.codec_ctx)->height);
+ av_image_fill_arrays(picture_scaled->data,
+ picture_scaled->linesize,
+ out_buf,
+ (output_ctx.codec_ctx)->pix_fmt,
+ (output_ctx.codec_ctx)->width,
+ (output_ctx.codec_ctx)->height,
+ 1);
sw_scale_ctx = sws_getCachedContext(NULL,
(input_ctx.codec_ctx)->width,
NULL, NULL, NULL);
if (sw_scale_ctx == NULL) {
fprintf(stderr, "cannot set up the rescaling context!\n");
+ ret = -EINVAL;
goto cleanup_out_buf;
}
+ got_packet = 0;
while (run) {
/* read packet */
- ret = av_read_frame(input_ctx.format_ctx, &packet);
+ ret = av_read_frame(input_ctx.format_ctx, &in_packet);
if (ret < 0) {
if (ret == (int)AVERROR_EOF || input_ctx.format_ctx->pb->eof_reached)
ret = 0;
goto end_while;
}
- if (packet.stream_index != input_ctx.video_stream_index) {
+ if (in_packet.stream_index != input_ctx.video_stream_index) {
/* that is more or less a "continue", but there is
* still the packet to free */
goto end_while;
/* decode */
got_picture = 0;
- ret = avcodec_decode_video2(input_ctx.codec_ctx, picture_raw, &got_picture, &packet);
+ ret = avcodec_decode_video2(input_ctx.codec_ctx, picture_raw, &got_picture, &in_packet);
if (ret < 0) {
fprintf(stderr, "cannot decode video\n");
run = 0;
goto end_while;
}
- /* if we get the complete frame */
+ /* if we got the complete frame */
if (got_picture) {
- /* convert it to YUV */
+ /*
+ * Rescaling the picture also changes its pixel format
+ * to the raw format supported by the projector if
+ * this was set in video_output_init()
+ */
sws_scale(sw_scale_ctx,
- (const uint8_t * const*)picture_raw->data,
+ (const uint8_t * const *)picture_raw->data,
picture_raw->linesize,
0,
(input_ctx.codec_ctx)->height,
picture_scaled->linesize);
if (output_ctx.raw_output) {
+ out_picture = out_buf;
out_picture_size = out_buf_size;
} else {
picture_scaled->quality = (output_ctx.codec_ctx)->global_quality;
- /* TODO: switch to avcodec_encode_video2() eventually */
- out_picture_size = avcodec_encode_video(output_ctx.codec_ctx,
- out_buf,
- out_buf_size,
- picture_scaled);
- if (out_picture_size < 0) {
+ av_init_packet(&out_packet);
+ out_packet.data = NULL;
+ out_packet.size = 0;
+ got_packet = 0;
+ ret = avcodec_encode_video2(output_ctx.codec_ctx,
+ &out_packet,
+ picture_scaled,
+ &got_packet);
+ if (ret < 0 || !got_packet) {
fprintf(stderr, "cannot encode video\n");
- ret = out_picture_size;
run = 0;
goto end_while;
}
+
+ out_picture = out_packet.data;
+ out_picture_size = out_packet.size;
}
#ifdef DEBUG
- char filename[NAME_MAX];
- FILE *file;
- if (!output_ctx.raw_output)
- snprintf(filename, NAME_MAX, "out_q%03d.jpg", quality);
- else
- snprintf(filename, NAME_MAX, "out.raw");
- file = fopen(filename, "wb");
- fwrite(out_buf, 1, out_picture_size, file);
- fclose(file);
+ if (dump_frame) {
+ char filename[NAME_MAX];
+ FILE *file;
+ if (!output_ctx.raw_output)
+ snprintf(filename, NAME_MAX, "out_q%03d.jpg", quality);
+ else
+ snprintf(filename, NAME_MAX, "out.raw");
+ file = fopen(filename, "wb");
+ fwrite(out_picture, 1, out_picture_size, file);
+ fclose(file);
+ }
+#else
+ (void) dump_frame;
#endif
- ret = am7xxx_send_image(dev,
- image_format,
- (output_ctx.codec_ctx)->width,
- (output_ctx.codec_ctx)->height,
- out_buf,
- out_picture_size);
+ ret = am7xxx_send_image_async(dev,
+ image_format,
+ (output_ctx.codec_ctx)->width,
+ (output_ctx.codec_ctx)->height,
+ out_picture,
+ out_picture_size);
if (ret < 0) {
- perror("am7xxx_send_image");
+ perror("am7xxx_send_image_async");
run = 0;
goto end_while;
}
}
end_while:
- av_free_packet(&packet);
+ if (!output_ctx.raw_output && got_packet)
+ av_packet_unref(&out_packet);
+ av_packet_unref(&in_packet);
}
sws_freeContext(sw_scale_ctx);
cleanup_out_buf:
av_free(out_buf);
cleanup_picture_scaled:
- av_free(picture_scaled);
+ av_frame_free(&picture_scaled);
cleanup_picture_raw:
- av_free(picture_raw);
+ av_frame_free(&picture_raw);
cleanup_output:
/* av_free is needed as well,
static char *get_x_screen_size(const char *input_path)
{
(void) input_path;
- fprintf(stderr, "%s: fallback implementation\n", __func__);
+ fprintf(stderr, "%s: fallback implementation, assuming a vga screen\n", __func__);
return strdup("vga");
}
#endif
run = 0;
}
+#ifdef HAVE_SIGACTION
static int set_signal_handler(void (*signal_handler)(int))
{
struct sigaction new_action;
struct sigaction old_action;
- int ret = 0;
+ int ret;
new_action.sa_handler = signal_handler;
sigemptyset(&new_action.sa_mask);
out:
return ret;
}
+#else
+static int set_signal_handler(void (*signal_handler)(int))
+{
+ (void)signal_handler;
+ fprintf(stderr, "set_signal_handler() not implemented, sigaction not available\n");
+ return 0;
+}
+#endif
+
static void usage(char *name)
{
printf("usage: %s [OPTIONS]\n\n", name);
printf("OPTIONS:\n");
+ printf("\t-d <index>\t\tthe device index (default is 0)\n");
+#ifdef DEBUG
+ printf("\t-D \t\t\tdump the last frame to a file (only active in DEBUG mode)\n");
+#endif
printf("\t-f <input format>\tthe input device format\n");
printf("\t-i <input path>\t\tthe input path\n");
printf("\t-o <options>\t\ta comma separated list of input format options\n");
printf("\t\t\t\t\t1 - JPEG\n");
printf("\t\t\t\t\t2 - NV12\n");
printf("\t-q <quality>\t\tquality of jpeg sent to the device, between 1 and 100\n");
- printf("\t-p <power level>\tpower level of device, between %x (off) and %x (maximum)\n", AM7XXX_POWER_OFF, AM7XXX_POWER_TURBO);
- printf("\t\t\t\tWARNING: Level 2 and greater require the master AND\n\t\t\t\t\t the slave connector to be plugged in.\n");
+ printf("\t-l <log level>\t\tthe verbosity level of libam7xxx output (0-5)\n");
+ printf("\t-p <power mode>\t\tthe power mode of device, between %d (off) and %d (turbo)\n",
+ AM7XXX_POWER_OFF, AM7XXX_POWER_TURBO);
+ printf("\t\t\t\tWARNING: Level 2 and greater require the master AND\n");
+ printf("\t\t\t\t the slave connector to be plugged in.\n");
+ printf("\t-z <zoom mode>\t\tthe display zoom mode, between %d (original) and %d (tele)\n",
+ AM7XXX_ZOOM_ORIGINAL, AM7XXX_ZOOM_TELE);
printf("\t-h \t\t\tthis help message\n");
printf("\n\nEXAMPLES OF USE:\n");
printf("\t%s -f x11grab -i :0.0 -o video_size=800x480\n", name);
unsigned int rescale_method = SWS_BICUBIC;
unsigned int upscale = 0;
unsigned int quality = 95;
- am7xxx_power_mode power_mode = AM7XXX_POWER_LOW;
+ int log_level = AM7XXX_LOG_INFO;
+ int device_index = 0;
+ int power_mode = AM7XXX_POWER_LOW;
+ int zoom = AM7XXX_ZOOM_ORIGINAL;
int format = AM7XXX_IMAGE_FORMAT_JPEG;
am7xxx_context *ctx;
am7xxx_device *dev;
+ int dump_frame = 0;
- while ((opt = getopt(argc, argv, "f:i:o:s:uF:q:hp:")) != -1) {
+ while ((opt = getopt(argc, argv, "d:Df:i:o:s:uF:q:l:p:z:h")) != -1) {
switch (opt) {
+ case 'd':
+ device_index = atoi(optarg);
+ if (device_index < 0) {
+ fprintf(stderr, "Unsupported device index\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ break;
+ case 'D':
+ dump_frame = 1;
+#ifndef DEBUG
+ fprintf(stderr, "Warning: the -D option is only active in DEBUG mode.\n");
+#endif
+ break;
case 'f':
input_format_string = strdup(optarg);
break;
input_path = strdup(optarg);
break;
case 'o':
+#ifdef HAVE_STRTOK_R
/*
* parse suboptions, the expected format is something
* like:
* draw_mouse=1,framerate=100,video_size=800x480
*/
subopts = subopts_saved = strdup(optarg);
- while((subopt = strtok_r(subopts, ",", &subopts))) {
+ while ((subopt = strtok_r(subopts, ",", &subopts))) {
char *subopt_name = strtok_r(subopt, "=", &subopt);
char *subopt_value = strtok_r(NULL, "", &subopt);
if (subopt_value == NULL) {
av_dict_set(&options, subopt_name, subopt_value, 0);
}
free(subopts_saved);
+#else
+ fprintf(stderr, "Option '-o' not implemented\n");
+#endif
break;
case 's':
rescale_method = atoi(optarg);
- switch(format) {
+ switch(rescale_method) {
case SWS_FAST_BILINEAR:
case SWS_BILINEAR:
case SWS_BICUBIC:
default:
fprintf(stderr, "Unsupported rescale method\n");
ret = -EINVAL;
- goto out;;
+ goto out;
}
break;
case 'u':
default:
fprintf(stderr, "Unsupported format\n");
ret = -EINVAL;
- goto out;;
+ goto out;
}
break;
case 'q':
if (quality < 1 || quality > 100) {
fprintf(stderr, "Invalid quality value, must be between 1 and 100\n");
ret = -EINVAL;
- goto out;;
+ goto out;
}
break;
- case 'h':
- usage(argv[0]);
- ret = 0;
- goto out;
+ case 'l':
+ log_level = atoi(optarg);
+ if (log_level < AM7XXX_LOG_FATAL || log_level > AM7XXX_LOG_TRACE) {
+ fprintf(stderr, "Unsupported log level, falling back to AM7XXX_LOG_ERROR\n");
+ log_level = AM7XXX_LOG_ERROR;
+ }
break;
case 'p':
power_mode = atoi(optarg);
case AM7XXX_POWER_MIDDLE:
case AM7XXX_POWER_HIGH:
case AM7XXX_POWER_TURBO:
- fprintf(stdout, "Power mode: %x\n", power_mode);
+ fprintf(stdout, "Power mode: %d\n", power_mode);
break;
default:
- fprintf(stderr, "Invalid power mode value, must be between %x and %x\n", AM7XXX_POWER_OFF, AM7XXX_POWER_TURBO);
+ fprintf(stderr, "Invalid power mode value, must be between %d and %d\n",
+ AM7XXX_POWER_OFF, AM7XXX_POWER_TURBO);
ret = -EINVAL;
goto out;
}
break;
+ case 'z':
+ zoom = atoi(optarg);
+ switch(zoom) {
+ case AM7XXX_ZOOM_ORIGINAL:
+ case AM7XXX_ZOOM_H:
+ case AM7XXX_ZOOM_H_V:
+ case AM7XXX_ZOOM_TEST:
+ case AM7XXX_ZOOM_TELE:
+ fprintf(stdout, "Zoom: %d\n", zoom);
+ break;
+ default:
+ fprintf(stderr, "Invalid zoom mode value, must be between %d and %d\n",
+ AM7XXX_ZOOM_ORIGINAL, AM7XXX_ZOOM_TELE);
+ ret = -EINVAL;
+ goto out;
+ }
+ break;
+ case 'h':
+ usage(argv[0]);
+ ret = 0;
+ goto out;
default: /* '?' */
usage(argv[0]);
ret = -EINVAL;
}
if (input_path == NULL) {
- fprintf(stderr, "The -i option must always be passed\n");
+ fprintf(stderr, "The -i option must always be passed\n\n");
+ usage(argv[0]);
ret = -EINVAL;
goto out;
}
goto out;
}
- ret = am7xxx_open_device(ctx, &dev, 0);
+ am7xxx_set_log_level(ctx, log_level);
+
+ ret = am7xxx_open_device(ctx, &dev, device_index);
if (ret < 0) {
perror("am7xxx_open_device");
goto cleanup;
}
+ ret = am7xxx_set_zoom_mode(dev, zoom);
+ if (ret < 0) {
+ perror("am7xxx_set_zoom_mode");
+ goto cleanup;
+ }
+
ret = am7xxx_set_power_mode(dev, power_mode);
if (ret < 0) {
perror("am7xxx_set_power_mode");
goto cleanup;
}
+ /* When setting AM7XXX_ZOOM_TEST don't display the actual image */
+ if (zoom == AM7XXX_ZOOM_TEST)
+ goto cleanup;
+
ret = am7xxx_play(input_format_string,
&options,
input_path,
upscale,
quality,
format,
- dev);
+ dev,
+ dump_frame);
if (ret < 0) {
fprintf(stderr, "am7xxx_play failed\n");
goto cleanup;