X-Git-Url: https://git.ao2.it/libam7xxx.git/blobdiff_plain/e73c33d0a4c2f4695f58a19b2a0a06af3b0353bd..refs/heads/master:/examples/am7xxx-play.c?ds=sidebyside diff --git a/examples/am7xxx-play.c b/examples/am7xxx-play.c index 531c156..81aff84 100644 --- a/examples/am7xxx-play.c +++ b/examples/am7xxx-play.c @@ -1,7 +1,7 @@ /* * am7xxx-play - play stuff on an am7xxx device (e.g. Acer C110, PicoPix 1020) * - * Copyright (C) 2012 Antonio Ospite + * Copyright (C) 2012-2014 Antonio Ospite * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -17,6 +17,12 @@ * along with this program. If not, see . */ +/** + * @example examples/am7xxx-play.c + * am7xxx-play uses libavdevice, libavformat, libavcodec and libswscale to + * decode the input, encode it to jpeg and display it with libam7xxx. + */ + #include #include #include @@ -25,6 +31,7 @@ #include #include +#include #include #include @@ -44,10 +51,10 @@ static int video_input_init(struct video_input_ctx *input_ctx, { AVInputFormat *input_format = NULL; AVFormatContext *input_format_ctx; + AVCodecParameters *input_codec_params; AVCodecContext *input_codec_ctx; AVCodec *input_codec; int video_index; - unsigned int i; int ret; avdevice_register_all(); @@ -84,7 +91,7 @@ static int video_input_init(struct video_input_ctx *input_ctx, /* get information on the input stream (e.g. format, bitrate, framerate) */ ret = avformat_find_stream_info(input_format_ctx, NULL); if (ret < 0) { - fprintf(stderr, "cannot get information on the stream"); + fprintf(stderr, "cannot get information on the stream\n"); goto cleanup; } @@ -92,40 +99,32 @@ static int video_input_init(struct video_input_ctx *input_ctx, av_dump_format(input_format_ctx, 0, input_path, 0); /* look for the first video_stream */ - video_index = -1; - for (i = 0; i < input_format_ctx->nb_streams; i++) - if (input_format_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) { - video_index = i; - break; - } - if (video_index == -1) { + video_index = av_find_best_stream(input_format_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, &input_codec, 0); + if (video_index < 0) { fprintf(stderr, "cannot find any video streams\n"); - ret = -ENOTSUP; + ret = -EINVAL; goto cleanup; } - /* get a pointer to the codec context for the video stream */ - input_codec_ctx = input_format_ctx->streams[video_index]->codec; + input_codec_ctx = avcodec_alloc_context3(input_codec); if (input_codec_ctx == NULL) { - fprintf(stderr, "input codec context is not valid\n"); - ret = -ENOTSUP; + fprintf(stderr, "failed to allocate the input codec context\n"); + ret = -ENOMEM; goto cleanup; } - /* find the decoder for the video stream */ - input_codec = avcodec_find_decoder(input_codec_ctx->codec_id); - if (input_codec == NULL) { - fprintf(stderr, "input_codec is NULL!"); - ret = -ENOTSUP; - goto cleanup; + input_codec_params = input_format_ctx->streams[video_index]->codecpar; + ret = avcodec_parameters_to_context(input_codec_ctx, input_codec_params); + if (ret < 0) { + fprintf(stderr, "cannot copy parameters to input codec context\n"); + goto cleanup_ctx; } /* open the decoder */ ret = avcodec_open2(input_codec_ctx, input_codec, NULL); if (ret < 0) { fprintf(stderr, "cannot open input codec\n"); - ret = -ENOTSUP; - goto cleanup; + goto cleanup_ctx; } input_ctx->format_ctx = input_format_ctx; @@ -135,6 +134,8 @@ static int video_input_init(struct video_input_ctx *input_ctx, ret = 0; goto out; +cleanup_ctx: + avcodec_free_context(&input_codec_ctx); cleanup: avformat_close_input(&input_format_ctx); out: @@ -163,7 +164,7 @@ static int video_output_init(struct video_output_ctx *output_ctx, int ret; if (input_ctx == NULL) { - fprintf(stderr, "input_ctx must not be NULL!"); + fprintf(stderr, "input_ctx must not be NULL!\n"); ret = -EINVAL; goto out; } @@ -171,12 +172,12 @@ static int video_output_init(struct video_output_ctx *output_ctx, /* create the encoder context */ output_codec_ctx = avcodec_alloc_context3(NULL); if (output_codec_ctx == NULL) { - fprintf(stderr, "cannot allocate output codec context!"); + fprintf(stderr, "cannot allocate output codec context!\n"); ret = -ENOMEM; goto out; } - /* Calculate the new output dimension so the original picture is shown + /* Calculate the new output dimension so the original frame is shown * in its entirety */ ret = am7xxx_calc_scaled_image_dimensions(dev, upscale, @@ -193,23 +194,26 @@ static int video_output_init(struct video_output_ctx *output_ctx, output_codec_ctx->bit_rate = (input_ctx->codec_ctx)->bit_rate; output_codec_ctx->width = new_output_width; output_codec_ctx->height = new_output_height; - output_codec_ctx->time_base.num = (input_ctx->codec_ctx)->time_base.num; - output_codec_ctx->time_base.den = (input_ctx->codec_ctx)->time_base.den; + output_codec_ctx->time_base.num = + (input_ctx->format_ctx)->streams[input_ctx->video_stream_index]->time_base.num; + output_codec_ctx->time_base.den = + (input_ctx->format_ctx)->streams[input_ctx->video_stream_index]->time_base.den; /* When the raw format is requested we don't actually need to setup * and open a decoder */ if (image_format == AM7XXX_IMAGE_FORMAT_NV12) { fprintf(stdout, "using raw output format\n"); - output_codec_ctx->pix_fmt = PIX_FMT_NV12; + output_codec_ctx->pix_fmt = AV_PIX_FMT_NV12; output_ctx->codec_ctx = output_codec_ctx; output_ctx->raw_output = 1; ret = 0; goto out; } - output_codec_ctx->pix_fmt = PIX_FMT_YUVJ420P; - output_codec_ctx->codec_id = CODEC_ID_MJPEG; + /* YUVJ420P is deprecated in swscaler, but mjpeg still relies on it. */ + output_codec_ctx->pix_fmt = AV_PIX_FMT_YUVJ420P; + output_codec_ctx->codec_id = AV_CODEC_ID_MJPEG; output_codec_ctx->codec_type = AVMEDIA_TYPE_VIDEO; /* Set quality and other VBR settings */ @@ -220,16 +224,16 @@ static int video_output_init(struct video_output_ctx *output_ctx, * in particular they won't be 0, this is needed because they are used * as divisor somewhere in the encoding process */ output_codec_ctx->qmin = output_codec_ctx->qmax = ((100 - (quality - 1)) * FF_QUALITY_SCALE) / 100; - output_codec_ctx->mb_lmin = output_codec_ctx->lmin = output_codec_ctx->qmin * FF_QP2LAMBDA; - output_codec_ctx->mb_lmax = output_codec_ctx->lmax = output_codec_ctx->qmax * FF_QP2LAMBDA; - output_codec_ctx->flags |= CODEC_FLAG_QSCALE; + output_codec_ctx->mb_lmin = output_codec_ctx->qmin * FF_QP2LAMBDA; + output_codec_ctx->mb_lmax = output_codec_ctx->qmax * FF_QP2LAMBDA; + output_codec_ctx->flags |= AV_CODEC_FLAG_QSCALE; output_codec_ctx->global_quality = output_codec_ctx->qmin * FF_QP2LAMBDA; /* find the encoder */ output_codec = avcodec_find_encoder(output_codec_ctx->codec_id); if (output_codec == NULL) { fprintf(stderr, "cannot find output codec!\n"); - ret = -ENOTSUP; + ret = -EINVAL; goto cleanup; } @@ -247,13 +251,73 @@ static int video_output_init(struct video_output_ctx *output_ctx, goto out; cleanup: - avcodec_close(output_codec_ctx); - av_free(output_codec_ctx); + avcodec_free_context(&output_codec_ctx); out: return ret; } +/* + * Wrap the new avcodec API from FFMpeg 3.1 to minimize the changes in the + * user code. + * + * If the use of the wrappers were to be made conditional, a check like the + * following could be used: + * + * #if (LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(57, 48, 101)) + * + * As derived from the APIchanges document: + * https://github.com/FFmpeg/FFmpeg/blob/master/doc/APIchanges + * + * The wrapper implementation has been taken from: + * https://blogs.gentoo.org/lu_zero/2016/03/29/new-avcodec-api/ + */ +static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt) +{ + int ret; + + *got_frame = 0; + + if (pkt) { + ret = avcodec_send_packet(avctx, pkt); + /* + * In particular, we don't expect AVERROR(EAGAIN), because we + * read all decoded frames with avcodec_receive_frame() until + * done. + */ + if (ret < 0) + return ret == AVERROR_EOF ? 0 : ret; + } + + ret = avcodec_receive_frame(avctx, frame); + if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) + return ret; + if (ret >= 0) + *got_frame = 1; + + return 0; +} + +static int encode(AVCodecContext *avctx, AVPacket *pkt, int *got_packet, AVFrame *frame) +{ + int ret; + + *got_packet = 0; + + ret = avcodec_send_frame(avctx, frame); + if (ret < 0) + return ret; + + ret = avcodec_receive_packet(avctx, pkt); + if (!ret) + *got_packet = 1; + if (ret == AVERROR(EAGAIN)) + return 0; + + return ret; +} + + static int am7xxx_play(const char *input_format_string, AVDictionary **input_options, const char *input_path, @@ -261,19 +325,23 @@ static int am7xxx_play(const char *input_format_string, unsigned int upscale, unsigned int quality, am7xxx_image_format image_format, - am7xxx_device *dev) + am7xxx_device *dev, + int dump_frame) { struct video_input_ctx input_ctx; struct video_output_ctx output_ctx; - AVFrame *picture_raw; - AVFrame *picture_scaled; + AVFrame *frame_raw; + AVFrame *frame_scaled; int out_buf_size; uint8_t *out_buf; - int out_picture_size; + int out_frame_size; + uint8_t *out_frame; struct SwsContext *sw_scale_ctx; - AVPacket packet; - int got_picture; - int ret = 0; + AVPacket in_packet; + AVPacket out_packet; + int got_frame; + int got_packet; + int ret; ret = video_input_init(&input_ctx, input_format_string, input_path, input_options); if (ret < 0) { @@ -288,35 +356,44 @@ static int am7xxx_play(const char *input_format_string, } /* allocate an input frame */ - picture_raw = avcodec_alloc_frame(); - if (picture_raw == NULL) { - fprintf(stderr, "cannot allocate the raw picture frame!"); + frame_raw = av_frame_alloc(); + if (frame_raw == NULL) { + fprintf(stderr, "cannot allocate the raw frame!\n"); + ret = -ENOMEM; goto cleanup_output; } /* allocate output frame */ - picture_scaled = avcodec_alloc_frame(); - if (picture_scaled == NULL) { - fprintf(stderr, "cannot allocate the scaled picture!\n"); - goto cleanup_picture_raw; + frame_scaled = av_frame_alloc(); + if (frame_scaled == NULL) { + fprintf(stderr, "cannot allocate the scaled frame!\n"); + ret = -ENOMEM; + goto cleanup_frame_raw; } + frame_scaled->format = (output_ctx.codec_ctx)->pix_fmt; + frame_scaled->width = (output_ctx.codec_ctx)->width; + frame_scaled->height = (output_ctx.codec_ctx)->height; /* calculate the bytes needed for the output image and create buffer for the output image */ - out_buf_size = avpicture_get_size((output_ctx.codec_ctx)->pix_fmt, - (output_ctx.codec_ctx)->width, - (output_ctx.codec_ctx)->height); + out_buf_size = av_image_get_buffer_size((output_ctx.codec_ctx)->pix_fmt, + (output_ctx.codec_ctx)->width, + (output_ctx.codec_ctx)->height, + 1); out_buf = av_malloc(out_buf_size * sizeof(uint8_t)); if (out_buf == NULL) { fprintf(stderr, "cannot allocate output data buffer!\n"); - goto cleanup_picture_scaled; + ret = -ENOMEM; + goto cleanup_frame_scaled; } - /* assign appropriate parts of buffer to image planes in picture_scaled */ - avpicture_fill((AVPicture *)picture_scaled, - out_buf, - (output_ctx.codec_ctx)->pix_fmt, - (output_ctx.codec_ctx)->width, - (output_ctx.codec_ctx)->height); + /* assign appropriate parts of buffer to image planes in frame_scaled */ + av_image_fill_arrays(frame_scaled->data, + frame_scaled->linesize, + out_buf, + (output_ctx.codec_ctx)->pix_fmt, + (output_ctx.codec_ctx)->width, + (output_ctx.codec_ctx)->height, + 1); sw_scale_ctx = sws_getCachedContext(NULL, (input_ctx.codec_ctx)->width, @@ -329,12 +406,14 @@ static int am7xxx_play(const char *input_format_string, NULL, NULL, NULL); if (sw_scale_ctx == NULL) { fprintf(stderr, "cannot set up the rescaling context!\n"); + ret = -EINVAL; goto cleanup_out_buf; } + got_packet = 0; while (run) { /* read packet */ - ret = av_read_frame(input_ctx.format_ctx, &packet); + ret = av_read_frame(input_ctx.format_ctx, &in_packet); if (ret < 0) { if (ret == (int)AVERROR_EOF || input_ctx.format_ctx->pb->eof_reached) ret = 0; @@ -344,94 +423,111 @@ static int am7xxx_play(const char *input_format_string, goto end_while; } - if (packet.stream_index != input_ctx.video_stream_index) { + if (in_packet.stream_index != input_ctx.video_stream_index) { /* that is more or less a "continue", but there is * still the packet to free */ goto end_while; } /* decode */ - got_picture = 0; - ret = avcodec_decode_video2(input_ctx.codec_ctx, picture_raw, &got_picture, &packet); + got_frame = 0; + ret = decode(input_ctx.codec_ctx, frame_raw, &got_frame, &in_packet); if (ret < 0) { fprintf(stderr, "cannot decode video\n"); run = 0; goto end_while; } - /* if we get the complete frame */ - if (got_picture) { - /* convert it to YUV */ + /* if we got the complete frame */ + if (got_frame) { + /* + * Rescaling the frame also changes its pixel format + * to the raw format supported by the projector if + * this was set in video_output_init() + */ sws_scale(sw_scale_ctx, - (const uint8_t * const*)picture_raw->data, - picture_raw->linesize, + (const uint8_t * const *)frame_raw->data, + frame_raw->linesize, 0, (input_ctx.codec_ctx)->height, - picture_scaled->data, - picture_scaled->linesize); + frame_scaled->data, + frame_scaled->linesize); if (output_ctx.raw_output) { - out_picture_size = out_buf_size; + out_frame = out_buf; + out_frame_size = out_buf_size; } else { - picture_scaled->quality = (output_ctx.codec_ctx)->global_quality; - /* TODO: switch to avcodec_encode_video2() eventually */ - out_picture_size = avcodec_encode_video(output_ctx.codec_ctx, - out_buf, - out_buf_size, - picture_scaled); - if (out_picture_size < 0) { + frame_scaled->quality = (output_ctx.codec_ctx)->global_quality; + av_init_packet(&out_packet); + out_packet.data = NULL; + out_packet.size = 0; + got_packet = 0; + ret = encode(output_ctx.codec_ctx, + &out_packet, + &got_packet, + frame_scaled); + if (ret < 0 || !got_packet) { fprintf(stderr, "cannot encode video\n"); - ret = out_picture_size; run = 0; goto end_while; } + + out_frame = out_packet.data; + out_frame_size = out_packet.size; } #ifdef DEBUG - char filename[NAME_MAX]; - FILE *file; - if (!output_ctx.raw_output) - snprintf(filename, NAME_MAX, "out_q%03d.jpg", quality); - else - snprintf(filename, NAME_MAX, "out.raw"); - file = fopen(filename, "wb"); - fwrite(out_buf, 1, out_picture_size, file); - fclose(file); + if (dump_frame) { + char filename[NAME_MAX]; + FILE *file; + if (!output_ctx.raw_output) + snprintf(filename, NAME_MAX, "out_q%03d.jpg", quality); + else + snprintf(filename, NAME_MAX, "out.raw"); + file = fopen(filename, "wb"); + fwrite(out_frame, 1, out_frame_size, file); + fclose(file); + } +#else + (void) dump_frame; #endif - ret = am7xxx_send_image(dev, - image_format, - (output_ctx.codec_ctx)->width, - (output_ctx.codec_ctx)->height, - out_buf, - out_picture_size); + ret = am7xxx_send_image_async(dev, + image_format, + (output_ctx.codec_ctx)->width, + (output_ctx.codec_ctx)->height, + out_frame, + out_frame_size); if (ret < 0) { - perror("am7xxx_send_image"); + perror("am7xxx_send_image_async"); run = 0; goto end_while; } } end_while: - av_free_packet(&packet); + if (!output_ctx.raw_output && got_packet) + av_packet_unref(&out_packet); + av_packet_unref(&in_packet); } sws_freeContext(sw_scale_ctx); cleanup_out_buf: av_free(out_buf); -cleanup_picture_scaled: - av_free(picture_scaled); -cleanup_picture_raw: - av_free(picture_raw); +cleanup_frame_scaled: + av_frame_free(&frame_scaled); +cleanup_frame_raw: + av_frame_free(&frame_raw); cleanup_output: - /* av_free is needed as well, - * see http://libav.org/doxygen/master/avcodec_8h.html#a5d7440cd7ea195bd0b14f21a00ef36dd + /* Freeing the codec context is needed as well, + * see https://libav.org/documentation/doxygen/master/group__lavc__core.html#gaf4daa92361efb3523ef5afeb0b54077f */ avcodec_close(output_ctx.codec_ctx); - av_free(output_ctx.codec_ctx); + avcodec_free_context(&(output_ctx.codec_ctx)); cleanup_input: avcodec_close(input_ctx.codec_ctx); + avcodec_free_context(&(input_ctx.codec_ctx)); avformat_close_input(&(input_ctx.format_ctx)); out: @@ -509,7 +605,7 @@ static char *get_x_screen_size(const char *input_path) static char *get_x_screen_size(const char *input_path) { (void) input_path; - fprintf(stderr, "%s: fallback implementation\n", __func__); + fprintf(stderr, "%s: fallback implementation, assuming a vga screen\n", __func__); return strdup("vga"); } #endif @@ -520,11 +616,12 @@ static void unset_run(int signo) run = 0; } +#ifdef HAVE_SIGACTION static int set_signal_handler(void (*signal_handler)(int)) { struct sigaction new_action; struct sigaction old_action; - int ret = 0; + int ret; new_action.sa_handler = signal_handler; sigemptyset(&new_action.sa_mask); @@ -547,25 +644,43 @@ static int set_signal_handler(void (*signal_handler)(int)) out: return ret; } +#else +static int set_signal_handler(void (*signal_handler)(int)) +{ + (void)signal_handler; + fprintf(stderr, "set_signal_handler() not implemented, sigaction not available\n"); + return 0; +} +#endif + static void usage(char *name) { printf("usage: %s [OPTIONS]\n\n", name); printf("OPTIONS:\n"); + printf("\t-d \t\tthe device index (default is 0)\n"); +#ifdef DEBUG + printf("\t-D \t\t\tdump the last frame to a file (only active in DEBUG mode)\n"); +#endif printf("\t-f \tthe input device format\n"); printf("\t-i \t\tthe input path\n"); printf("\t-o \t\ta comma separated list of input format options\n"); printf("\t\t\t\tEXAMPLE:\n"); printf("\t\t\t\t\t-o draw_mouse=1,framerate=100,video_size=800x480\n"); - printf("\t-s \t\tthe rescaling method (see swscale.h)\n"); + printf("\t-s \tthe rescaling method (see swscale.h)\n"); printf("\t-u \t\t\tupscale the image if smaller than the display dimensions\n"); printf("\t-F \t\tthe image format to use (default is JPEG)\n"); printf("\t\t\t\tSUPPORTED FORMATS:\n"); printf("\t\t\t\t\t1 - JPEG\n"); printf("\t\t\t\t\t2 - NV12\n"); printf("\t-q \t\tquality of jpeg sent to the device, between 1 and 100\n"); - printf("\t-p \tpower level of device, between %x (off) and %x (maximum)\n", AM7XXX_POWER_OFF, AM7XXX_POWER_TURBO); - printf("\t\t\t\tWARNING: Level 2 and greater require the master AND\n\t\t\t\t\t the slave connector to be plugged in.\n"); + printf("\t-l \t\tthe verbosity level of libam7xxx output (0-5)\n"); + printf("\t-p \t\tthe power mode of device, between %d (off) and %d (turbo)\n", + AM7XXX_POWER_OFF, AM7XXX_POWER_TURBO); + printf("\t\t\t\tWARNING: Level 2 and greater require the master AND\n"); + printf("\t\t\t\t the slave connector to be plugged in.\n"); + printf("\t-z \t\tthe display zoom mode, between %d (original) and %d (tele)\n", + AM7XXX_ZOOM_ORIGINAL, AM7XXX_ZOOM_TELE); printf("\t-h \t\t\tthis help message\n"); printf("\n\nEXAMPLES OF USE:\n"); printf("\t%s -f x11grab -i :0.0 -o video_size=800x480\n", name); @@ -587,13 +702,31 @@ int main(int argc, char *argv[]) unsigned int rescale_method = SWS_BICUBIC; unsigned int upscale = 0; unsigned int quality = 95; - am7xxx_power_mode power_mode = AM7XXX_POWER_LOW; + int log_level = AM7XXX_LOG_INFO; + int device_index = 0; + int power_mode = AM7XXX_POWER_LOW; + int zoom = AM7XXX_ZOOM_ORIGINAL; int format = AM7XXX_IMAGE_FORMAT_JPEG; am7xxx_context *ctx; am7xxx_device *dev; + int dump_frame = 0; - while ((opt = getopt(argc, argv, "f:i:o:s:uF:q:hp:")) != -1) { + while ((opt = getopt(argc, argv, "d:Df:i:o:s:uF:q:l:p:z:h")) != -1) { switch (opt) { + case 'd': + device_index = atoi(optarg); + if (device_index < 0) { + fprintf(stderr, "Unsupported device index\n"); + ret = -EINVAL; + goto out; + } + break; + case 'D': + dump_frame = 1; +#ifndef DEBUG + fprintf(stderr, "Warning: the -D option is only active in DEBUG mode.\n"); +#endif + break; case 'f': input_format_string = strdup(optarg); break; @@ -601,13 +734,14 @@ int main(int argc, char *argv[]) input_path = strdup(optarg); break; case 'o': +#ifdef HAVE_STRTOK_R /* * parse suboptions, the expected format is something * like: * draw_mouse=1,framerate=100,video_size=800x480 */ subopts = subopts_saved = strdup(optarg); - while((subopt = strtok_r(subopts, ",", &subopts))) { + while ((subopt = strtok_r(subopts, ",", &subopts))) { char *subopt_name = strtok_r(subopt, "=", &subopt); char *subopt_value = strtok_r(NULL, "", &subopt); if (subopt_value == NULL) { @@ -617,10 +751,13 @@ int main(int argc, char *argv[]) av_dict_set(&options, subopt_name, subopt_value, 0); } free(subopts_saved); +#else + fprintf(stderr, "Option '-o' not implemented\n"); +#endif break; case 's': rescale_method = atoi(optarg); - switch(format) { + switch(rescale_method) { case SWS_FAST_BILINEAR: case SWS_BILINEAR: case SWS_BICUBIC: @@ -636,7 +773,7 @@ int main(int argc, char *argv[]) default: fprintf(stderr, "Unsupported rescale method\n"); ret = -EINVAL; - goto out;; + goto out; } break; case 'u': @@ -654,7 +791,7 @@ int main(int argc, char *argv[]) default: fprintf(stderr, "Unsupported format\n"); ret = -EINVAL; - goto out;; + goto out; } break; case 'q': @@ -662,13 +799,15 @@ int main(int argc, char *argv[]) if (quality < 1 || quality > 100) { fprintf(stderr, "Invalid quality value, must be between 1 and 100\n"); ret = -EINVAL; - goto out;; + goto out; } break; - case 'h': - usage(argv[0]); - ret = 0; - goto out; + case 'l': + log_level = atoi(optarg); + if (log_level < AM7XXX_LOG_FATAL || log_level > AM7XXX_LOG_TRACE) { + fprintf(stderr, "Unsupported log level, falling back to AM7XXX_LOG_ERROR\n"); + log_level = AM7XXX_LOG_ERROR; + } break; case 'p': power_mode = atoi(optarg); @@ -678,14 +817,36 @@ int main(int argc, char *argv[]) case AM7XXX_POWER_MIDDLE: case AM7XXX_POWER_HIGH: case AM7XXX_POWER_TURBO: - fprintf(stdout, "Power mode: %x\n", power_mode); + fprintf(stdout, "Power mode: %d\n", power_mode); + break; + default: + fprintf(stderr, "Invalid power mode value, must be between %d and %d\n", + AM7XXX_POWER_OFF, AM7XXX_POWER_TURBO); + ret = -EINVAL; + goto out; + } + break; + case 'z': + zoom = atoi(optarg); + switch(zoom) { + case AM7XXX_ZOOM_ORIGINAL: + case AM7XXX_ZOOM_H: + case AM7XXX_ZOOM_H_V: + case AM7XXX_ZOOM_TEST: + case AM7XXX_ZOOM_TELE: + fprintf(stdout, "Zoom: %d\n", zoom); break; default: - fprintf(stderr, "Invalid power mode value, must be between %x and %x\n", AM7XXX_POWER_OFF, AM7XXX_POWER_TURBO); + fprintf(stderr, "Invalid zoom mode value, must be between %d and %d\n", + AM7XXX_ZOOM_ORIGINAL, AM7XXX_ZOOM_TELE); ret = -EINVAL; goto out; } break; + case 'h': + usage(argv[0]); + ret = 0; + goto out; default: /* '?' */ usage(argv[0]); ret = -EINVAL; @@ -694,7 +855,8 @@ int main(int argc, char *argv[]) } if (input_path == NULL) { - fprintf(stderr, "The -i option must always be passed\n"); + fprintf(stderr, "The -i option must always be passed\n\n"); + usage(argv[0]); ret = -EINVAL; goto out; } @@ -732,18 +894,30 @@ int main(int argc, char *argv[]) goto out; } - ret = am7xxx_open_device(ctx, &dev, 0); + am7xxx_set_log_level(ctx, log_level); + + ret = am7xxx_open_device(ctx, &dev, device_index); if (ret < 0) { perror("am7xxx_open_device"); goto cleanup; } + ret = am7xxx_set_zoom_mode(dev, zoom); + if (ret < 0) { + perror("am7xxx_set_zoom_mode"); + goto cleanup; + } + ret = am7xxx_set_power_mode(dev, power_mode); if (ret < 0) { perror("am7xxx_set_power_mode"); goto cleanup; } + /* When setting AM7XXX_ZOOM_TEST don't display the actual image */ + if (zoom == AM7XXX_ZOOM_TEST) + goto cleanup; + ret = am7xxx_play(input_format_string, &options, input_path, @@ -751,7 +925,8 @@ int main(int argc, char *argv[]) upscale, quality, format, - dev); + dev, + dump_frame); if (ret < 0) { fprintf(stderr, "am7xxx_play failed\n"); goto cleanup;