diff --git a/configure b/configure index a1110bde52..7bcc5bfdc2 100755 --- a/configure +++ b/configure @@ -1077,8 +1077,6 @@ HAVE_LIST=" isatty kbhit ldbrx - libdc1394_1 - libdc1394_2 llrint llrintf local_aligned_16 @@ -2916,6 +2914,7 @@ check_mathfunc truncf enabled avisynth && require2 vfw32 "windows.h vfw.h" AVIFileInit -lavifil32 enabled libcelt && require libcelt celt/celt.h celt_decode -lcelt0 enabled frei0r && { check_header frei0r.h || die "ERROR: frei0r.h header not found"; } +enabled libdc1394 && require_pkg_config libdc1394-2 dc1394/dc1394.h dc1394_new enabled libdirac && require_pkg_config dirac \ "libdirac_decoder/dirac_parser.h libdirac_encoder/dirac_encoder.h" \ "dirac_decoder_init dirac_encoder_init" @@ -2947,15 +2946,6 @@ enabled libxavs && require libxavs xavs.h xavs_encoder_encode -lxavs enabled libxvid && require libxvid xvid.h xvid_global -lxvidcore enabled mlib && require mediaLib mlib_types.h mlib_VectorSub_S16_U8_Mod -lmlib -# libdc1394 check -if enabled libdc1394; then - { check_lib dc1394/dc1394.h dc1394_new -ldc1394 -lraw1394 && - enable libdc1394_2; } || - { check_lib libdc1394/dc1394_control.h dc1394_create_handle -ldc1394_control -lraw1394 && - enable libdc1394_1; } || - die "ERROR: No version of libdc1394 found " -fi - SDL_CONFIG="${cross_prefix}sdl-config" if check_pkg_config sdl SDL_version.h SDL_Linked_Version; then check_cpp_condition SDL.h "(SDL_MAJOR_VERSION<<16 | SDL_MINOR_VERSION<<8 | SDL_PATCHLEVEL) >= 0x010201" $sdl_cflags && diff --git a/libavcodec/Makefile b/libavcodec/Makefile index fa05d03758..768bbde494 100644 --- a/libavcodec/Makefile +++ b/libavcodec/Makefile @@ -666,7 +666,8 @@ OBJS-$(!CONFIG_SMALL) += inverse.o -include $(SUBDIR)$(ARCH)/Makefile -SKIPHEADERS = %_tablegen.h +SKIPHEADERS += %_tablegen.h aac_tablegen_decl.h \ + fft-internal.h $(ARCH)/vp56_arith.h SKIPHEADERS-$(CONFIG_DXVA2) += dxva2.h dxva2_internal.h SKIPHEADERS-$(CONFIG_LIBDIRAC) += libdirac.h SKIPHEADERS-$(CONFIG_LIBSCHROEDINGER) += libschroedinger.h diff --git a/libavcodec/arm/mathops.h b/libavcodec/arm/mathops.h index 26404772bc..db427b59b4 100644 --- a/libavcodec/arm/mathops.h +++ b/libavcodec/arm/mathops.h @@ -28,18 +28,6 @@ #if HAVE_INLINE_ASM -# define MULL MULL -static inline av_const int MULL(int a, int b, unsigned shift) -{ - int lo, hi; - __asm__("smull %0, %1, %2, %3 \n\t" - "mov %0, %0, lsr %4 \n\t" - "add %1, %0, %1, lsl %5 \n\t" - : "=&r"(lo), "=&r"(hi) - : "r"(b), "r"(a), "ir"(shift), "ir"(32-shift)); - return hi; -} - #define MULH MULH #define MUL64 MUL64 diff --git a/libavcodec/mathops.h b/libavcodec/mathops.h index 29b3cd0acb..0477cfdce3 100644 --- a/libavcodec/mathops.h +++ b/libavcodec/mathops.h @@ -23,6 +23,7 @@ #define AVCODEC_MATHOPS_H #include "libavutil/common.h" +#include "config.h" #if ARCH_ARM # include "arm/mathops.h" @@ -40,13 +41,17 @@ /* generic implementation */ +#ifndef MUL64 +# define MUL64(a,b) ((int64_t)(a) * (int64_t)(b)) +#endif + #ifndef MULL -# define MULL(a,b,s) (((int64_t)(a) * (int64_t)(b)) >> (s)) +# define MULL(a,b,s) (MUL64(a, b) >> (s)) #endif #ifndef MULH static av_always_inline int MULH(int a, int b){ - return ((int64_t)(a) * (int64_t)(b))>>32; + return MUL64(a, b) >> 32; } #endif @@ -56,10 +61,6 @@ static av_always_inline unsigned UMULH(unsigned a, unsigned b){ } #endif -#ifndef MUL64 -# define MUL64(a,b) ((int64_t)(a) * (int64_t)(b)) -#endif - #ifndef MAC64 # define MAC64(d, a, b) ((d) += MUL64(a, b)) #endif diff --git a/libavcodec/mpc.h b/libavcodec/mpc.h index 365580ebd0..cd5769234c 100644 --- a/libavcodec/mpc.h +++ b/libavcodec/mpc.h @@ -34,8 +34,7 @@ #include "get_bits.h" #include "dsputil.h" #include "mpegaudio.h" - -#include "mpcdata.h" +#include "mpegaudiodsp.h" #define BANDS 32 #define SAMPLES_PER_BAND 36 diff --git a/libavcodec/mpc8.c b/libavcodec/mpc8.c index 51c5509425..2864b1a010 100644 --- a/libavcodec/mpc8.c +++ b/libavcodec/mpc8.c @@ -33,7 +33,6 @@ #include "libavutil/audioconvert.h" #include "mpc.h" -#include "mpcdata.h" #include "mpc8data.h" #include "mpc8huff.h" diff --git a/libavcodec/mpcdata.h b/libavcodec/mpcdata.h index a2212ecb25..03df3da3b5 100644 --- a/libavcodec/mpcdata.h +++ b/libavcodec/mpcdata.h @@ -22,8 +22,6 @@ #ifndef AVCODEC_MPCDATA_H #define AVCODEC_MPCDATA_H -#include - static const float mpc_CC[18] = { 65536.0000, 21845.3333, 13107.2000, 9362.2857, 7281.7778, 4369.0667, 2114.0645, 1040.2539, 516.0315, 257.0039, 128.2505, 64.0626, 32.0156, 16.0039, 8.0010, diff --git a/libavcodec/mpegaudiodectab.h b/libavcodec/mpegaudiodectab.h index 4dd8a7cfc9..accd12b8e2 100644 --- a/libavcodec/mpegaudiodectab.h +++ b/libavcodec/mpegaudiodectab.h @@ -27,7 +27,9 @@ #ifndef AVCODEC_MPEGAUDIODECTAB_H #define AVCODEC_MPEGAUDIODECTAB_H +#include #include + #include "mpegaudio.h" /*******************************************************/ diff --git a/libavcodec/put_bits.h b/libavcodec/put_bits.h index d301d0afcc..79016912d5 100644 --- a/libavcodec/put_bits.h +++ b/libavcodec/put_bits.h @@ -34,6 +34,7 @@ #include "libavutil/intreadwrite.h" #include "libavutil/log.h" #include "mathops.h" +#include "config.h" //#define ALT_BITSTREAM_WRITER //#define ALIGNED_BITSTREAM_WRITER diff --git a/libavcodec/tableprint.h b/libavcodec/tableprint.h index d81b9a387b..cf7c1914e0 100644 --- a/libavcodec/tableprint.h +++ b/libavcodec/tableprint.h @@ -26,8 +26,6 @@ #include #include -#include "libavutil/common.h" - #define WRITE_1D_FUNC_ARGV(type, linebrk, fmtstr, ...)\ void write_##type##_array(const type *data, int len)\ {\ diff --git a/libavdevice/bktr.c b/libavdevice/bktr.c index a8ca7ecfb7..f39a3c9957 100644 --- a/libavdevice/bktr.c +++ b/libavdevice/bktr.c @@ -251,11 +251,6 @@ static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap) AVRational fps; int ret = 0; - if (ap->time_base.den <= 0) { - ret = AVERROR(EINVAL); - goto out; - } - #if FF_API_FORMAT_PARAMETERS if (ap->standard) { if (!strcasecmp(ap->standard, "pal")) diff --git a/libavdevice/fbdev.c b/libavdevice/fbdev.c index 0d41b5d6ba..dadcd70015 100644 --- a/libavdevice/fbdev.c +++ b/libavdevice/fbdev.c @@ -169,7 +169,7 @@ av_cold static int fbdev_read_header(AVFormatContext *avctx, st->codec->width = fbdev->width; st->codec->height = fbdev->heigth; st->codec->pix_fmt = pix_fmt; - st->codec->time_base = ap->time_base; + st->codec->time_base = (AVRational){fbdev->fps.den, fbdev->fps.num}; st->codec->bit_rate = fbdev->width * fbdev->heigth * fbdev->bytes_per_pixel * av_q2d(fbdev->fps) * 8; diff --git a/libavdevice/libdc1394.c b/libavdevice/libdc1394.c index aba02269a5..f77a25748a 100644 --- a/libavdevice/libdc1394.c +++ b/libavdevice/libdc1394.c @@ -2,6 +2,7 @@ * IIDC1394 grab interface (uses libdc1394 and libraw1394) * Copyright (c) 2004 Roman Shaposhnik * Copyright (c) 2008 Alessandro Sappia + * Copyright (c) 2011 Martin Lambers * * This file is part of FFmpeg. * @@ -21,44 +22,24 @@ */ #include "config.h" -#include "libavformat/avformat.h" #include "libavutil/log.h" #include "libavutil/opt.h" +#include "avdevice.h" + +#include +#include #include "libavutil/parseutils.h" #include "libavutil/pixdesc.h" -#if HAVE_LIBDC1394_2 #include -#elif HAVE_LIBDC1394_1 -#include -#include - -#define DC1394_VIDEO_MODE_320x240_YUV422 MODE_320x240_YUV422 -#define DC1394_VIDEO_MODE_640x480_YUV411 MODE_640x480_YUV411 -#define DC1394_VIDEO_MODE_640x480_YUV422 MODE_640x480_YUV422 -#define DC1394_FRAMERATE_1_875 FRAMERATE_1_875 -#define DC1394_FRAMERATE_3_75 FRAMERATE_3_75 -#define DC1394_FRAMERATE_7_5 FRAMERATE_7_5 -#define DC1394_FRAMERATE_15 FRAMERATE_15 -#define DC1394_FRAMERATE_30 FRAMERATE_30 -#define DC1394_FRAMERATE_60 FRAMERATE_60 -#define DC1394_FRAMERATE_120 FRAMERATE_120 -#define DC1394_FRAMERATE_240 FRAMERATE_240 -#endif #undef free typedef struct dc1394_data { AVClass *class; -#if HAVE_LIBDC1394_1 - raw1394handle_t handle; - dc1394_cameracapture camera; - int channel; -#elif HAVE_LIBDC1394_2 dc1394_t *d; dc1394camera_t *camera; dc1394video_frame_t *frame; -#endif int current_frame; int frame_rate; /**< frames per 1000 seconds (fps * 1000) */ char *video_size; /**< String describing video size, set by a private option. */ @@ -68,16 +49,21 @@ typedef struct dc1394_data { AVPacket packet; } dc1394_data; -struct dc1394_frame_format { - int width; - int height; - enum PixelFormat pix_fmt; - int frame_size_id; -} dc1394_frame_formats[] = { - { 320, 240, PIX_FMT_UYVY422, DC1394_VIDEO_MODE_320x240_YUV422 }, - { 640, 480, PIX_FMT_UYYVYY411, DC1394_VIDEO_MODE_640x480_YUV411 }, - { 640, 480, PIX_FMT_UYVY422, DC1394_VIDEO_MODE_640x480_YUV422 }, - { 0, 0, 0, 0 } /* gotta be the last one */ +/* The list of color codings that we support. + * We assume big endian for the dc1394 16bit modes: libdc1394 never sets the + * flag little_endian in dc1394video_frame_t. */ +struct dc1394_color_coding { + int pix_fmt; + int score; + uint32_t coding; +} dc1394_color_codings[] = { + { PIX_FMT_GRAY16BE, 1000, DC1394_COLOR_CODING_MONO16 }, + { PIX_FMT_RGB48BE, 1100, DC1394_COLOR_CODING_RGB16 }, + { PIX_FMT_GRAY8, 1200, DC1394_COLOR_CODING_MONO8 }, + { PIX_FMT_RGB24, 1300, DC1394_COLOR_CODING_RGB8 }, + { PIX_FMT_UYYVYY411, 1400, DC1394_COLOR_CODING_YUV411 }, + { PIX_FMT_UYVY422, 1500, DC1394_COLOR_CODING_YUV422 }, + { PIX_FMT_NONE, 0, 0 } /* gotta be the last one */ }; struct dc1394_frame_rate { @@ -98,9 +84,6 @@ struct dc1394_frame_rate { #define OFFSET(x) offsetof(dc1394_data, x) #define DEC AV_OPT_FLAG_DECODING_PARAM static const AVOption options[] = { -#if HAVE_LIBDC1394_1 - { "channel", "", offsetof(dc1394_data, channel), FF_OPT_TYPE_INT, {.dbl = 0}, 0, INT_MAX, AV_OPT_FLAG_DECODING_PARAM }, -#endif { "video_size", "A string describing frame size, such as 640x480 or hd720.", OFFSET(video_size), FF_OPT_TYPE_STRING, {.str = "qvga"}, 0, 0, DEC }, { "pixel_format", "", OFFSET(pixel_format), FF_OPT_TYPE_STRING, {.str = "uyvy422"}, 0, 0, DEC }, { "framerate", "", OFFSET(framerate), FF_OPT_TYPE_STRING, {.str = "ntsc"}, 0, 0, DEC }, @@ -114,203 +97,23 @@ static const AVClass libdc1394_class = { .version = LIBAVUTIL_VERSION_INT, }; - -static inline int dc1394_read_common(AVFormatContext *c, AVFormatParameters *ap, - struct dc1394_frame_format **select_fmt, struct dc1394_frame_rate **select_fps) -{ - dc1394_data* dc1394 = c->priv_data; - AVStream* vst; - struct dc1394_frame_format *fmt; - struct dc1394_frame_rate *fps; - enum PixelFormat pix_fmt; - int width, height; - AVRational framerate; - int ret = 0; - - if ((pix_fmt = av_get_pix_fmt(dc1394->pixel_format)) == PIX_FMT_NONE) { - av_log(c, AV_LOG_ERROR, "No such pixel format: %s.\n", dc1394->pixel_format); - ret = AVERROR(EINVAL); - goto out; - } - - if ((ret = av_parse_video_size(&width, &height, dc1394->video_size)) < 0) { - av_log(c, AV_LOG_ERROR, "Couldn't parse video size.\n"); - goto out; - } - if ((ret = av_parse_video_rate(&framerate, dc1394->framerate)) < 0) { - av_log(c, AV_LOG_ERROR, "Couldn't parse framerate.\n"); - goto out; - } -#if FF_API_FORMAT_PARAMETERS - if (ap->width > 0) - width = ap->width; - if (ap->height > 0) - height = ap->height; - if (ap->pix_fmt) - pix_fmt = ap->pix_fmt; - if (ap->time_base.num) - framerate = (AVRational){ap->time_base.den, ap->time_base.num}; -#endif - dc1394->frame_rate = av_rescale(1000, framerate.num, framerate.den); - - for (fmt = dc1394_frame_formats; fmt->width; fmt++) - if (fmt->pix_fmt == pix_fmt && fmt->width == width && fmt->height == height) - break; - - for (fps = dc1394_frame_rates; fps->frame_rate; fps++) - if (fps->frame_rate == dc1394->frame_rate) - break; - - if (!fps->frame_rate || !fmt->width) { - av_log(c, AV_LOG_ERROR, "Can't find matching camera format for %s, %dx%d@%d:1000fps\n", av_get_pix_fmt_name(pix_fmt), - width, height, dc1394->frame_rate); - ret = AVERROR(EINVAL); - goto out; - } - - /* create a video stream */ - vst = av_new_stream(c, 0); - if (!vst) { - ret = AVERROR(ENOMEM); - goto out; - } - av_set_pts_info(vst, 64, 1, 1000); - vst->codec->codec_type = AVMEDIA_TYPE_VIDEO; - vst->codec->codec_id = CODEC_ID_RAWVIDEO; - vst->codec->time_base.den = framerate.num; - vst->codec->time_base.num = framerate.den; - vst->codec->width = fmt->width; - vst->codec->height = fmt->height; - vst->codec->pix_fmt = fmt->pix_fmt; - - /* packet init */ - av_init_packet(&dc1394->packet); - dc1394->packet.size = avpicture_get_size(fmt->pix_fmt, fmt->width, fmt->height); - dc1394->packet.stream_index = vst->index; - dc1394->packet.flags |= AV_PKT_FLAG_KEY; - - dc1394->current_frame = 0; - - vst->codec->bit_rate = av_rescale(dc1394->packet.size * 8, fps->frame_rate, 1000); - *select_fps = fps; - *select_fmt = fmt; -out: - av_freep(&dc1394->video_size); - av_freep(&dc1394->pixel_format); - av_freep(&dc1394->framerate); - return ret; -} - -#if HAVE_LIBDC1394_1 -static int dc1394_v1_read_header(AVFormatContext *c, AVFormatParameters * ap) -{ - dc1394_data* dc1394 = c->priv_data; - AVStream* vst; - nodeid_t* camera_nodes; - int res; - struct dc1394_frame_format *fmt = NULL; - struct dc1394_frame_rate *fps = NULL; - - if (dc1394_read_common(c,ap,&fmt,&fps) != 0) - return -1; - -#if FF_API_FORMAT_PARAMETERS - if (ap->channel) - dc1394->channel = ap->channel; -#endif - - /* Now let us prep the hardware. */ - dc1394->handle = dc1394_create_handle(0); /* FIXME: gotta have ap->port */ - if (!dc1394->handle) { - av_log(c, AV_LOG_ERROR, "Can't acquire dc1394 handle on port %d\n", 0 /* ap->port */); - goto out; - } - camera_nodes = dc1394_get_camera_nodes(dc1394->handle, &res, 1); - if (!camera_nodes || camera_nodes[dc1394->channel] == DC1394_NO_CAMERA) { - av_log(c, AV_LOG_ERROR, "There's no IIDC camera on the channel %d\n", dc1394->channel); - goto out_handle; - } - res = dc1394_dma_setup_capture(dc1394->handle, camera_nodes[dc1394->channel], - 0, - FORMAT_VGA_NONCOMPRESSED, - fmt->frame_size_id, - SPEED_400, - fps->frame_rate_id, 8, 1, - c->filename, - &dc1394->camera); - dc1394_free_camera_nodes(camera_nodes); - if (res != DC1394_SUCCESS) { - av_log(c, AV_LOG_ERROR, "Can't prepare camera for the DMA capture\n"); - goto out_handle; - } - - res = dc1394_start_iso_transmission(dc1394->handle, dc1394->camera.node); - if (res != DC1394_SUCCESS) { - av_log(c, AV_LOG_ERROR, "Can't start isochronous transmission\n"); - goto out_handle_dma; - } - - return 0; - -out_handle_dma: - dc1394_dma_unlisten(dc1394->handle, &dc1394->camera); - dc1394_dma_release_camera(dc1394->handle, &dc1394->camera); -out_handle: - dc1394_destroy_handle(dc1394->handle); -out: - return -1; -} - -static int dc1394_v1_read_packet(AVFormatContext *c, AVPacket *pkt) -{ - struct dc1394_data *dc1394 = c->priv_data; - int res; - - /* discard stale frame */ - if (dc1394->current_frame++) { - if (dc1394_dma_done_with_buffer(&dc1394->camera) != DC1394_SUCCESS) - av_log(c, AV_LOG_ERROR, "failed to release %d frame\n", dc1394->current_frame); - } - - res = dc1394_dma_single_capture(&dc1394->camera); - - if (res == DC1394_SUCCESS) { - dc1394->packet.data = (uint8_t *)(dc1394->camera.capture_buffer); - dc1394->packet.pts = (dc1394->current_frame * 1000000) / dc1394->frame_rate; - res = dc1394->packet.size; - } else { - av_log(c, AV_LOG_ERROR, "DMA capture failed\n"); - dc1394->packet.data = NULL; - res = -1; - } - - *pkt = dc1394->packet; - return res; -} - -static int dc1394_v1_close(AVFormatContext * context) -{ - struct dc1394_data *dc1394 = context->priv_data; - - dc1394_stop_iso_transmission(dc1394->handle, dc1394->camera.node); - dc1394_dma_unlisten(dc1394->handle, &dc1394->camera); - dc1394_dma_release_camera(dc1394->handle, &dc1394->camera); - dc1394_destroy_handle(dc1394->handle); - - return 0; -} - -#elif HAVE_LIBDC1394_2 -static int dc1394_v2_read_header(AVFormatContext *c, AVFormatParameters * ap) +static int dc1394_read_header(AVFormatContext *c, AVFormatParameters * ap) { dc1394_data* dc1394 = c->priv_data; + AVStream *vst; + const struct dc1394_color_coding *cc; + const struct dc1394_frame_rate *fr; dc1394camera_list_t *list; - int res, i; - struct dc1394_frame_format *fmt = NULL; - struct dc1394_frame_rate *fps = NULL; - - if (dc1394_read_common(c,ap,&fmt,&fps) != 0) - return -1; + dc1394video_modes_t video_modes; + dc1394video_mode_t video_mode; + dc1394framerates_t frame_rates; + dc1394framerate_t frame_rate; + uint32_t dc1394_width, dc1394_height, dc1394_color_coding; + int rate, best_rate; + int score, max_score; + int final_width, final_height, final_pix_fmt, final_frame_rate; + int res, i, j; + int ret=-1; /* Now let us prep the hardware. */ dc1394->d = dc1394_new(); @@ -329,6 +132,149 @@ static int dc1394_v2_read_header(AVFormatContext *c, AVFormatParameters * ap) /* Freeing list of cameras */ dc1394_camera_free_list (list); + /* Get the list of video modes supported by the camera. */ + res = dc1394_video_get_supported_modes (dc1394->camera, &video_modes); + if (res != DC1394_SUCCESS) { + av_log(c, AV_LOG_ERROR, "Could not get video formats.\n"); + goto out_camera; + } + + if (dc1394->pixel_format) { + if ((ap->pix_fmt = av_get_pix_fmt(dc1394->pixel_format)) == PIX_FMT_NONE) { + av_log(c, AV_LOG_ERROR, "No such pixel format: %s.\n", dc1394->pixel_format); + ret = AVERROR(EINVAL); + goto out; + } + } + + if (dc1394->video_size) { + if ((ret = av_parse_video_size(&ap->width, &ap->height, dc1394->video_size)) < 0) { + av_log(c, AV_LOG_ERROR, "Couldn't parse video size.\n"); + goto out; + } + } + + /* Choose the best mode. */ + rate = (ap->time_base.num ? av_rescale(1000, ap->time_base.den, ap->time_base.num) : -1); + max_score = -1; + for (i = 0; i < video_modes.num; i++) { + if (video_modes.modes[i] == DC1394_VIDEO_MODE_EXIF + || (video_modes.modes[i] >= DC1394_VIDEO_MODE_FORMAT7_MIN + && video_modes.modes[i] <= DC1394_VIDEO_MODE_FORMAT7_MAX)) { + /* These modes are currently not supported as they would require + * much more work. For the remaining modes, the functions + * dc1394_get_image_size_from_video_mode and + * dc1394_get_color_coding_from_video_mode do not need to query the + * camera, and thus cannot fail. */ + continue; + } + dc1394_get_color_coding_from_video_mode (NULL, video_modes.modes[i], + &dc1394_color_coding); + for (cc = dc1394_color_codings; cc->pix_fmt != PIX_FMT_NONE; cc++) + if (cc->coding == dc1394_color_coding) + break; + if (cc->pix_fmt == PIX_FMT_NONE) { + /* We currently cannot handle this color coding. */ + continue; + } + /* Here we know that the mode is supported. Get its frame size and the list + * of frame rates supported by the camera for this mode. This list is sorted + * in ascending order according to libdc1394 example programs. */ + dc1394_get_image_size_from_video_mode (NULL, video_modes.modes[i], + &dc1394_width, &dc1394_height); + res = dc1394_video_get_supported_framerates (dc1394->camera, video_modes.modes[i], + &frame_rates); + if (res != DC1394_SUCCESS || frame_rates.num == 0) { + av_log(c, AV_LOG_ERROR, "Cannot get frame rates for video mode.\n"); + goto out_camera; + } + /* Choose the best frame rate. */ + best_rate = -1; + for (j = 0; j < frame_rates.num; j++) { + for (fr = dc1394_frame_rates; fr->frame_rate; fr++) { + if (fr->frame_rate_id == frame_rates.framerates[j]) { + break; + } + } + if (!fr->frame_rate) { + /* This frame rate is not supported. */ + continue; + } + best_rate = fr->frame_rate; + frame_rate = fr->frame_rate_id; + if (ap->time_base.num && rate == fr->frame_rate) { + /* This is the requested frame rate. */ + break; + } + } + if (best_rate == -1) { + /* No supported rate found. */ + continue; + } + /* Here we know that both the mode and the rate are supported. Compute score. */ + if (ap->width && ap->height + && (dc1394_width == ap->width && dc1394_height == ap->height)) { + score = 110000; + } else { + score = dc1394_width * 10; // 1600 - 16000 + } + if (ap->pix_fmt == cc->pix_fmt) { + score += 90000; + } else { + score += cc->score; // 1000 - 1500 + } + if (ap->time_base.num && rate == best_rate) { + score += 70000; + } else { + score += best_rate / 1000; // 1 - 240 + } + if (score > max_score) { + video_mode = video_modes.modes[i]; + final_width = dc1394_width; + final_height = dc1394_height; + final_pix_fmt = cc->pix_fmt; + final_frame_rate = best_rate; + max_score = score; + } + } + if (max_score == -1) { + av_log(c, AV_LOG_ERROR, "No suitable video mode / frame rate available.\n"); + goto out_camera; + } + if (ap->width && ap->height && !(ap->width == final_width && ap->height == final_height)) { + av_log(c, AV_LOG_WARNING, "Requested frame size is not available, using fallback.\n"); + } + if (ap->pix_fmt != PIX_FMT_NONE && ap->pix_fmt != final_pix_fmt) { + av_log(c, AV_LOG_WARNING, "Requested pixel format is not supported, using fallback.\n"); + } + if (ap->time_base.num && rate != final_frame_rate) { + av_log(c, AV_LOG_WARNING, "Requested frame rate is not available, using fallback.\n"); + } + + /* create a video stream */ + vst = av_new_stream(c, 0); + if (!vst) + goto out_camera; + av_set_pts_info(vst, 64, 1, 1000); + vst->codec->codec_type = AVMEDIA_TYPE_VIDEO; + vst->codec->codec_id = CODEC_ID_RAWVIDEO; + vst->codec->time_base.den = final_frame_rate; + vst->codec->time_base.num = 1000; + vst->codec->width = final_width; + vst->codec->height = final_height; + vst->codec->pix_fmt = final_pix_fmt; + + /* packet init */ + av_init_packet(&dc1394->packet); + dc1394->packet.size = avpicture_get_size(final_pix_fmt, final_width, final_height); + dc1394->packet.stream_index = vst->index; + dc1394->packet.flags |= AV_PKT_FLAG_KEY; + + dc1394->current_frame = 0; + dc1394->frame_rate = final_frame_rate; + + vst->codec->bit_rate = av_rescale(dc1394->packet.size * 8, final_frame_rate, 1000); + /* Select MAX Speed possible from the cam */ if (dc1394->camera->bmode_capable>0) { dc1394_video_set_operation_mode(dc1394->camera, DC1394_OPERATION_MODE_1394B); @@ -345,13 +291,13 @@ static int dc1394_v2_read_header(AVFormatContext *c, AVFormatParameters * ap) goto out_camera; } - if (dc1394_video_set_mode(dc1394->camera, fmt->frame_size_id) != DC1394_SUCCESS) { + if (dc1394_video_set_mode(dc1394->camera, video_mode) != DC1394_SUCCESS) { av_log(c, AV_LOG_ERROR, "Couldn't set video format\n"); goto out_camera; } - if (dc1394_video_set_framerate(dc1394->camera,fps->frame_rate_id) != DC1394_SUCCESS) { - av_log(c, AV_LOG_ERROR, "Couldn't set framerate %d \n",fps->frame_rate); + if (dc1394_video_set_framerate(dc1394->camera, frame_rate) != DC1394_SUCCESS) { + av_log(c, AV_LOG_ERROR, "Could not set framerate %d.\n", final_frame_rate); goto out_camera; } if (dc1394_capture_setup(dc1394->camera, 10, DC1394_CAPTURE_FLAGS_DEFAULT)!=DC1394_SUCCESS) { @@ -370,11 +316,13 @@ out_camera: dc1394_video_set_transmission(dc1394->camera, DC1394_OFF); dc1394_camera_free (dc1394->camera); out: + av_freep(&dc1394->video_size); + av_freep(&dc1394->pixel_format); dc1394_free(dc1394->d); - return -1; + return ret; } -static int dc1394_v2_read_packet(AVFormatContext *c, AVPacket *pkt) +static int dc1394_read_packet(AVFormatContext *c, AVPacket *pkt) { struct dc1394_data *dc1394 = c->priv_data; int res; @@ -400,7 +348,7 @@ static int dc1394_v2_read_packet(AVFormatContext *c, AVPacket *pkt) return res; } -static int dc1394_v2_close(AVFormatContext * context) +static int dc1394_close(AVFormatContext * context) { struct dc1394_data *dc1394 = context->priv_data; @@ -414,25 +362,11 @@ static int dc1394_v2_close(AVFormatContext * context) AVInputFormat ff_libdc1394_demuxer = { .name = "libdc1394", - .long_name = NULL_IF_CONFIG_SMALL("dc1394 v.2 A/V grab"), + .long_name = NULL_IF_CONFIG_SMALL("dc1394 A/V grab"), .priv_data_size = sizeof(struct dc1394_data), - .read_header = dc1394_v2_read_header, - .read_packet = dc1394_v2_read_packet, - .read_close = dc1394_v2_close, + .read_header = dc1394_read_header, + .read_packet = dc1394_read_packet, + .read_close = dc1394_close, .flags = AVFMT_NOFILE, .priv_class = &libdc1394_class, }; - -#endif -#if HAVE_LIBDC1394_1 -AVInputFormat ff_libdc1394_demuxer = { - .name = "libdc1394", - .long_name = NULL_IF_CONFIG_SMALL("dc1394 v.1 A/V grab"), - .priv_data_size = sizeof(struct dc1394_data), - .read_header = dc1394_v1_read_header, - .read_packet = dc1394_v1_read_packet, - .read_close = dc1394_v1_close, - .flags = AVFMT_NOFILE, - .priv_class = &libdc1394_class, -}; -#endif diff --git a/libavdevice/v4l2.c b/libavdevice/v4l2.c index 08cf0096f6..d669f073b5 100644 --- a/libavdevice/v4l2.c +++ b/libavdevice/v4l2.c @@ -526,6 +526,8 @@ static int v4l2_set_parameters(AVFormatContext *s1, AVFormatParameters *ap) return AVERROR(errno); } } + s1->streams[0]->codec->time_base.den = tpf->denominator; + s1->streams[0]->codec->time_base.num = tpf->numerator; return 0; } @@ -658,8 +660,6 @@ static int v4l2_read_header(AVFormatContext *s1, AVFormatParameters *ap) st->codec->codec_id = codec_id; st->codec->width = s->width; st->codec->height = s->height; - st->codec->time_base.den = ap->time_base.den; - st->codec->time_base.num = ap->time_base.num; st->codec->bit_rate = s->frame_size * 1/av_q2d(st->codec->time_base) * 8; out: diff --git a/libavdevice/x11grab.c b/libavdevice/x11grab.c index 58a8ae5571..87ace1e57a 100644 --- a/libavdevice/x11grab.c +++ b/libavdevice/x11grab.c @@ -71,6 +71,7 @@ struct x11_grab int use_shm; /**< !0 when using XShm extension */ XShmSegmentInfo shminfo; /**< When using XShm, keeps track of XShm infos */ int nomouse; + char *framerate; /**< Set by a private option. */ }; /** @@ -97,6 +98,7 @@ x11grab_read_header(AVFormatContext *s1, AVFormatParameters *ap) int use_shm; char *dpyname, *offset; int ret = 0; + AVRational framerate; dpyname = av_strdup(s1->filename); offset = strchr(dpyname, '+'); @@ -110,11 +112,17 @@ x11grab_read_header(AVFormatContext *s1, AVFormatParameters *ap) av_log(s1, AV_LOG_ERROR, "Couldn't parse video size.\n"); goto out; } + if ((ret = av_parse_video_rate(&framerate, x11grab->framerate)) < 0) { + av_log(s1, AV_LOG_ERROR, "Could not parse framerate: %s.\n", x11grab->framerate); + goto out; + } #if FF_API_FORMAT_PARAMETERS if (ap->width > 0) x11grab->width = ap->width; if (ap->height > 0) x11grab->height = ap->height; + if (ap->time_base.num) + framerate = (AVRational){ap->time_base.den, ap->time_base.num}; #endif av_log(s1, AV_LOG_INFO, "device: %s -> display: %s x: %d y: %d width: %d height: %d\n", s1->filename, dpyname, x_off, y_off, x11grab->width, x11grab->height); @@ -127,12 +135,6 @@ x11grab_read_header(AVFormatContext *s1, AVFormatParameters *ap) goto out; } - if (ap->time_base.den <= 0) { - av_log(s1, AV_LOG_ERROR, "AVParameters don't have video size and/or rate. Use -s and -r.\n"); - ret = AVERROR(EINVAL); - goto out; - } - st = av_new_stream(s1, 0); if (!st) { ret = AVERROR(ENOMEM); @@ -241,8 +243,8 @@ x11grab_read_header(AVFormatContext *s1, AVFormatParameters *ap) x11grab->frame_size = x11grab->width * x11grab->height * image->bits_per_pixel/8; x11grab->dpy = dpy; - x11grab->time_base = ap->time_base; - x11grab->time_frame = av_gettime() / av_q2d(ap->time_base); + x11grab->time_base = (AVRational){framerate.den, framerate.num}; + x11grab->time_frame = av_gettime() / av_q2d(x11grab->time_base); x11grab->x_off = x_off; x11grab->y_off = y_off; x11grab->image = image; @@ -253,11 +255,12 @@ x11grab_read_header(AVFormatContext *s1, AVFormatParameters *ap) st->codec->width = x11grab->width; st->codec->height = x11grab->height; st->codec->pix_fmt = input_pixfmt; - st->codec->time_base = ap->time_base; - st->codec->bit_rate = x11grab->frame_size * 1/av_q2d(ap->time_base) * 8; + st->codec->time_base = x11grab->time_base; + st->codec->bit_rate = x11grab->frame_size * 1/av_q2d(x11grab->time_base) * 8; out: av_freep(&x11grab->video_size); + av_freep(&x11grab->framerate); return ret; } @@ -465,6 +468,7 @@ x11grab_read_close(AVFormatContext *s1) #define DEC AV_OPT_FLAG_DECODING_PARAM static const AVOption options[] = { { "video_size", "A string describing frame size, such as 640x480 or hd720.", OFFSET(video_size), FF_OPT_TYPE_STRING, {.str = "vga"}, 0, 0, DEC }, + { "framerate", "", OFFSET(framerate), FF_OPT_TYPE_STRING, {.str = "ntsc"}, 0, 0, DEC }, { NULL }, }; diff --git a/libavformat/ingenientdec.c b/libavformat/ingenientdec.c index eb1e6f6521..4a7ff09dbe 100644 --- a/libavformat/ingenientdec.c +++ b/libavformat/ingenientdec.c @@ -61,11 +61,12 @@ static int ingenient_read_packet(AVFormatContext *s, AVPacket *pkt) AVInputFormat ff_ingenient_demuxer = { "ingenient", NULL_IF_CONFIG_SMALL("raw Ingenient MJPEG"), - 0, + sizeof(FFRawVideoDemuxerContext), NULL, ff_raw_video_read_header, ingenient_read_packet, .flags= AVFMT_GENERIC_INDEX, .extensions = "cgi", // FIXME .value = CODEC_ID_MJPEG, + .priv_class = &ff_rawvideo_demuxer_class, }; diff --git a/libavformat/network.h b/libavformat/network.h index 1b14506fd3..f7e19b196e 100644 --- a/libavformat/network.h +++ b/libavformat/network.h @@ -21,7 +21,10 @@ #ifndef AVFORMAT_NETWORK_H #define AVFORMAT_NETWORK_H +#include + #include "config.h" +#include "libavutil/error.h" #include "os_support.h" #if HAVE_WINSOCK2_H diff --git a/libavformat/rawdec.c b/libavformat/rawdec.c index 43b83e98d8..c70ff49d51 100644 --- a/libavformat/rawdec.c +++ b/libavformat/rawdec.c @@ -72,11 +72,8 @@ int ff_raw_read_header(AVFormatContext *s, AVFormatParameters *ap) FFRawVideoDemuxerContext *s1 = s->priv_data; int width = 0, height = 0, ret = 0; enum PixelFormat pix_fmt; + AVRational framerate; - if(ap->time_base.num) - av_set_pts_info(st, 64, ap->time_base.num, ap->time_base.den); - else - av_set_pts_info(st, 64, 1, 25); if (s1->video_size && (ret = av_parse_video_size(&width, &height, s1->video_size)) < 0) { av_log(s, AV_LOG_ERROR, "Couldn't parse video size.\n"); goto fail; @@ -86,6 +83,10 @@ int ff_raw_read_header(AVFormatContext *s, AVFormatParameters *ap) ret = AVERROR(EINVAL); goto fail; } + if ((ret = av_parse_video_rate(&framerate, s1->framerate)) < 0) { + av_log(s, AV_LOG_ERROR, "Could not parse framerate: %s.\n", s1->framerate); + goto fail; + } #if FF_API_FORMAT_PARAMETERS if (ap->width > 0) width = ap->width; @@ -93,13 +94,17 @@ int ff_raw_read_header(AVFormatContext *s, AVFormatParameters *ap) height = ap->height; if (ap->pix_fmt) pix_fmt = ap->pix_fmt; + if (ap->time_base.num) + framerate = (AVRational){ap->time_base.den, ap->time_base.num}; #endif + av_set_pts_info(st, 64, framerate.den, framerate.num); st->codec->width = width; st->codec->height = height; st->codec->pix_fmt = pix_fmt; fail: av_freep(&s1->video_size); av_freep(&s1->pixel_format); + av_freep(&s1->framerate); return ret; } default: @@ -150,30 +155,36 @@ int ff_raw_video_read_header(AVFormatContext *s, AVFormatParameters *ap) { AVStream *st; + FFRawVideoDemuxerContext *s1 = s->priv_data; + AVRational framerate; + int ret = 0; + st = av_new_stream(s, 0); - if (!st) - return AVERROR(ENOMEM); + if (!st) { + ret = AVERROR(ENOMEM); + goto fail; + } st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = s->iformat->value; st->need_parsing = AVSTREAM_PARSE_FULL; - /* for MJPEG, specify frame rate */ - /* for MPEG-4 specify it, too (most MPEG-4 streams do not have the fixed_vop_rate set ...)*/ - if (ap->time_base.num) { - st->codec->time_base= ap->time_base; - } else if ( st->codec->codec_id == CODEC_ID_MJPEG || - st->codec->codec_id == CODEC_ID_MPEG4 || - st->codec->codec_id == CODEC_ID_DIRAC || - st->codec->codec_id == CODEC_ID_DNXHD || - st->codec->codec_id == CODEC_ID_VC1 || - st->codec->codec_id == CODEC_ID_H264) { - st->codec->time_base= (AVRational){1,25}; + if ((ret = av_parse_video_rate(&framerate, s1->framerate)) < 0) { + av_log(s, AV_LOG_ERROR, "Could not parse framerate: %s.\n", s1->framerate); + goto fail; } +#if FF_API_FORMAT_PARAMETERS + if (ap->time_base.num) + framerate = (AVRational){ap->time_base.den, ap->time_base.num}; +#endif + + st->codec->time_base = (AVRational){framerate.den, framerate.num}; av_set_pts_info(st, 64, 1, 1200000); - return 0; +fail: + av_freep(&s1->framerate); + return ret; } /* Note: Do not forget to add new entries to the Makefile as well. */ @@ -196,6 +207,7 @@ const AVClass ff_rawaudio_demuxer_class = { static const AVOption video_options[] = { { "video_size", "A string describing frame size, such as 640x480 or hd720.", OFFSET(video_size), FF_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC }, { "pixel_format", "", OFFSET(pixel_format), FF_OPT_TYPE_STRING, {.str = "yuv420p"}, 0, 0, DEC }, + { "framerate", "", OFFSET(framerate), FF_OPT_TYPE_STRING, {.str = "25"}, 0, 0, DEC }, { NULL }, }; #undef OFFSET diff --git a/libavformat/rawdec.h b/libavformat/rawdec.h index f5d40c063f..6cb5af2b0a 100644 --- a/libavformat/rawdec.h +++ b/libavformat/rawdec.h @@ -35,6 +35,7 @@ typedef struct FFRawVideoDemuxerContext { const AVClass *class; /**< Class for private options. */ char *video_size; /**< String describing video size, set by a private option. */ char *pixel_format; /**< Set by a private option. */ + char *framerate; /**< String describing framerate, set by a private option. */ } FFRawVideoDemuxerContext; extern const AVClass ff_rawaudio_demuxer_class; @@ -58,6 +59,8 @@ AVInputFormat ff_ ## shortname ## _demuxer = {\ .extensions = ext,\ .flags = AVFMT_GENERIC_INDEX,\ .value = id,\ + .priv_data_size = sizeof(FFRawVideoDemuxerContext),\ + .priv_class = &ff_rawvideo_demuxer_class,\ }; #endif /* AVFORMAT_RAWDEC_H */ diff --git a/libavformat/tty.c b/libavformat/tty.c index 8340218bd2..bf426f64b6 100644 --- a/libavformat/tty.c +++ b/libavformat/tty.c @@ -73,21 +73,20 @@ static int read_header(AVFormatContext *avctx, AVFormatParameters *ap) { TtyDemuxContext *s = avctx->priv_data; - int width = 0, height = 0, ret; + int width = 0, height = 0, ret = 0; AVStream *st = av_new_stream(avctx, 0); - if (!st) - return AVERROR(ENOMEM); + + if (!st) { + ret = AVERROR(ENOMEM); + goto fail; + } st->codec->codec_tag = 0; st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = CODEC_ID_ANSI; - if (s->video_size) { - ret = av_parse_video_size(&width, &height, s->video_size); - av_freep(&s->video_size); - if (ret < 0) { - av_log (avctx, AV_LOG_ERROR, "Couldn't parse video size.\n"); - return ret; - } + if (s->video_size && (ret = av_parse_video_size(&width, &height, s->video_size)) < 0) { + av_log (avctx, AV_LOG_ERROR, "Couldn't parse video size.\n"); + goto fail; } #if FF_API_FORMAT_PARAMETERS if (ap->width > 0) @@ -121,7 +120,9 @@ static int read_header(AVFormatContext *avctx, avio_seek(avctx->pb, 0, SEEK_SET); } - return 0; +fail: + av_freep(&s->video_size); + return ret; } static int read_packet(AVFormatContext *avctx, AVPacket *pkt)