1
0
mirror of https://github.com/mpv-player/mpv synced 2024-12-26 09:02:38 +00:00

Merge svn changes r31318 to r31328

r31328 is a somewhat questionable (changing the option at that point
isn't quite safe), but it was a failure case already...
This commit is contained in:
Uoti Urpala 2010-06-05 23:34:17 +03:00
commit dde8b753e4
11 changed files with 38 additions and 297 deletions

View File

@ -4356,7 +4356,9 @@ Transforms the video stream into a sequence of uncompressed YUV 4:2:0
images and stores it in a file (default: ./stream.yuv).
The format is the same as the one employed by mjpegtools, so this is
useful if you want to process the video with the mjpegtools suite.
It supports the YV12, RGB (24 bpp) and BGR (24 bpp) format.
It supports the YV12 format.
If your source file has a different format and is interlaced, make sure
to use -vf scale=::1 to ensure the conversion uses interlaced mode.
You can combine it with the \-fixed\-vo option to concatenate files
with the same dimensions and fps value.
.PD 0

View File

@ -1,18 +1,10 @@
.\" sync with en/mplayer.1 rev. 31173
.\" sync with en/mplayer.1 rev. 31292
.\" Encoding: UTF-8
.\" Reminder of hard terms which need better/final solution later:
.\" /capture; playtree in parent list; colorkey; retrace; desync; downmix;
.\" wrapper script ... shellescapes; keyframe; deblock; dering;
.\" encoding audio rate; alphamap; upsampling; sweep;
.\" aligned buffers; instancenumber;
.\"
.\" MPlayer (C) 2000-2010 MPlayer 团队
.\" 本(英文)手册页由 Gabucino, Diego Biurrun, Jonas Jermann 编写
.\" 视频滤镜、通用编解码器选项、特定编解码器专用选项三个章节由
.\" JRaSH <jrash06 AT 163.com>重新翻译
.\" 参于翻译本中文手册页的有:
.\" JRaSH <jrash06 AT 163.com>,
.\" Lu Ran <hephooey@fastmail.fm>, Sheldon Jin <jinsh2 AT yahoo.com>,
.
.\" --------------------------------------------------------------------------
.\" 宏定义
@ -1116,6 +1108,7 @@ MPlayer 从标准输入读取以新行 (\\n) 分隔开的命令行,
选择 AC-3 音频流的动态范围压缩(DRC)级别。
<级别> 是 0 到 1 的浮点数, 其中 0 表示无压缩,
(默认值) 1 表示全压缩 (使得响亮的通路更安静或相反)。
可使用大于 2 的值,但仅供试验。
此选项仅当 AC-3 流包含所需范围的压缩信息时显示效果。
.
.TP
@ -1687,6 +1680,10 @@ radio://1, radio://104.4, radio_set_channel 1
.PD 1
.
.TP
.B \-referrer <字符串>(仅用于网络环境)
为 HTTP 请求指定引用页面的路径或 URL。
.
.TP
.B \-rtsp\-port
与 "rtsp://" 的 URL 地址一起用来锁定客户端的端口号。
如果你在路由器后面, 想把 RTSP 流从服务器转到指定客户端, 此选项可能有用。

View File

@ -158,7 +158,7 @@ int _dvdcss_test( dvdcss_t dvdcss )
default: psz_type = "unknown status"; break;
}
print_debug( dvdcss, "drive region mask %x, %s, %s",
print_debug( dvdcss, "drive region mask 0x%x, %s, %s",
i_mask, psz_rpc, psz_type );
if( i_copyright && i_rpc == 1 && i_type == 0 )

View File

@ -367,12 +367,12 @@ LIBDVDCSS_EXPORT dvdcss_t dvdcss_open ( char *psz_target )
if( dvdcss->b_ioctls )
{
i_ret = _dvdcss_test( dvdcss );
if( i_ret == -2 )
if( i_ret == -3 )
{
/* Scrambled disk, RPC-II drive, no region set: bail out */
free( dvdcss->psz_device );
free( dvdcss );
return NULL;
print_debug( dvdcss, "scrambled disc on a region-free RPC-II "
"drive: possible failure, but continuing "
"anyway" );
}
else if( i_ret < 0 )
{

View File

@ -34,7 +34,7 @@
#include "vf.h"
#include "libavcodec/avcodec.h"
#include "libavcodec/eval.h"
#include "libavutil/eval.h"
struct vf_priv_s {
AVExpr * e[3];
@ -116,7 +116,7 @@ static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts){
const_values[3]=y;
for(x=0; x<w; x++){
const_values[2]=x;
dst[x + y * dst_stride] = ff_eval_expr(vf->priv->e[plane],
dst[x + y * dst_stride] = av_eval_expr(vf->priv->e[plane],
const_values, vf);
}
}
@ -178,7 +178,7 @@ static int vf_open(vf_instance_t *vf, char *args){
plane==0 ? lum : (plane==1 ? cb : cr),
NULL
};
res = ff_parse_expr(&vf->priv->e[plane], eq[plane], const_names, NULL, NULL, func2_names, func2, 0, NULL);
res = av_parse_expr(&vf->priv->e[plane], eq[plane], const_names, NULL, NULL, func2_names, func2, 0, NULL);
if (res < 0) {
mp_msg(MSGT_VFILTER, MSGL_ERR, "geq: error loading equation `%s'\n", eq[plane]);

View File

@ -32,7 +32,7 @@
#include "libvo/fastmemcpy.h"
#include "libavcodec/avcodec.h"
#include "libavcodec/eval.h"
#include "libavutil/eval.h"
struct vf_priv_s {
@ -69,7 +69,7 @@ static int config(struct vf_instance *vf,
double temp_val;
int res;
res= ff_parse_and_eval_expr(&temp_val, vf->priv->eq, const_names, const_values, NULL, NULL, NULL, NULL, NULL, 0, NULL);
res= av_parse_and_eval_expr(&temp_val, vf->priv->eq, const_names, const_values, NULL, NULL, NULL, NULL, NULL, 0, NULL);
if (res < 0){
mp_msg(MSGT_VFILTER, MSGL_ERR, "qp: Error evaluating \"%s\" \n", vf->priv->eq);

View File

@ -80,11 +80,10 @@ static int mp_read(void *opaque, uint8_t *buf, int size) {
struct stream *stream = demuxer->stream;
int ret;
if(stream_eof(stream)) //needed?
return -1;
ret=stream_read(stream, buf, size);
mp_msg(MSGT_HEADER,MSGL_DBG2,"%d=mp_read(%p, %p, %d), eof:%d\n", ret, stream, buf, size, stream->eof);
mp_msg(MSGT_HEADER,MSGL_DBG2,"%d=mp_read(%p, %p, %d), pos: %"PRId64", eof:%d\n",
ret, stream, buf, size, stream_tell(stream), stream->eof);
return ret;
}
@ -106,8 +105,6 @@ static int64_t mp_seek(void *opaque, int64_t pos, int whence) {
if(pos<0)
return -1;
if(pos<stream->end_pos && stream->eof)
stream_reset(stream);
current_pos = stream_tell(stream);
if(stream_seek(stream, pos)==0) {
stream_reset(stream);

View File

@ -69,6 +69,7 @@ typedef enum
VIDEO_MPEG4 = 0x10000004,
VIDEO_H264 = 0x10000005,
VIDEO_AVC = mmioFOURCC('a', 'v', 'c', '1'),
VIDEO_DIRAC = mmioFOURCC('d', 'r', 'a', 'c'),
VIDEO_VC1 = mmioFOURCC('W', 'V', 'C', '1'),
AUDIO_MP2 = 0x50,
AUDIO_A52 = 0x2000,
@ -245,7 +246,7 @@ typedef struct {
#define IS_AUDIO(x) (((x) == AUDIO_MP2) || ((x) == AUDIO_A52) || ((x) == AUDIO_LPCM_BE) || ((x) == AUDIO_AAC) || ((x) == AUDIO_DTS) || ((x) == AUDIO_TRUEHD))
#define IS_VIDEO(x) (((x) == VIDEO_MPEG1) || ((x) == VIDEO_MPEG2) || ((x) == VIDEO_MPEG4) || ((x) == VIDEO_H264) || ((x) == VIDEO_AVC) || ((x) == VIDEO_VC1))
#define IS_VIDEO(x) (((x) == VIDEO_MPEG1) || ((x) == VIDEO_MPEG2) || ((x) == VIDEO_MPEG4) || ((x) == VIDEO_H264) || ((x) == VIDEO_AVC) || ((x) == VIDEO_DIRAC) || ((x) == VIDEO_VC1))
#define IS_SUB(x) (((x) == SPU_DVD) || ((x) == SPU_DVB) || ((x) == SPU_TELETEXT))
static int ts_parse(demuxer_t *demuxer, ES_stream_t *es, unsigned char *packet, int probe);
@ -2326,6 +2327,10 @@ static int parse_descriptors(struct pmt_es_t *es, uint8_t *ptr)
{
es->type = VIDEO_VC1;
}
else if(d[0] == 'd' && d[1] == 'r' && d[2] == 'a' && d[3] == 'c')
{
es->type = VIDEO_DIRAC;
}
else
es->type = UNKNOWN;
mp_msg(MSGT_DEMUX, MSGL_DBG2, "FORMAT %s\n", es->format_descriptor);
@ -2523,6 +2528,9 @@ static int parse_pmt(ts_priv_t * priv, uint16_t progid, uint16_t pid, int is_sta
case 0x86:
pmt->es[idx].type = AUDIO_DTS;
break;
case 0xD1:
pmt->es[idx].type = VIDEO_DIRAC;
break;
case 0xEA:
pmt->es[idx].type = VIDEO_VC1;
break;

View File

@ -40,7 +40,7 @@ int geometry(int *xpos, int *ypos, int *widw, int *widh, int scrw, int scrh)
int width, height, xoff, yoff, xper, yper;
int i;
int ok = 0;
for (i = 0; !ok && i < 8; i++) {
for (i = 0; !ok && i < 9; i++) {
width = height = xoff = yoff = xper = yper = INT_MIN;
strcpy(xsign, "+");
strcpy(ysign, "+");
@ -72,6 +72,9 @@ int geometry(int *xpos, int *ypos, int *widw, int *widh, int scrw, int scrh)
case 7:
ok = sscanf(vo_geometry, "%i%1[%]%c", &xper, dummy, dummy) == 2;
break;
case 8:
ok = sscanf(vo_geometry, "%i%c", &xoff, dummy) == 1;
break;
}
}
if (!ok) {

View File

@ -54,11 +54,6 @@
#include "sub.h"
#include "fastmemcpy.h"
#include "libswscale/swscale.h"
#ifdef CONFIG_LIBSWSCALE_INTERNALS
#include "libswscale/rgb2rgb.h"
#endif
#include "libmpcodecs/vf_scale.h"
#include "libavutil/rational.h"
static const vo_info_t info =
@ -80,9 +75,6 @@ static uint8_t *image_y = NULL;
static uint8_t *image_u = NULL;
static uint8_t *image_v = NULL;
static uint8_t *rgb_buffer = NULL;
static uint8_t *rgb_line_buffer = NULL;
static char *yuv_filename = NULL;
static int using_format = 0;
@ -127,18 +119,6 @@ static int config(uint32_t width, uint32_t height, uint32_t d_width,
"Interlaced mode requires image height to be divisible by 4.");
return -1;
}
rgb_line_buffer = malloc(image_width * 3);
if (!rgb_line_buffer)
{
mp_tmsg(MSGT_VO,MSGL_FATAL,
"Unable to allocate line buffer for interlaced mode.");
return -1;
}
if (using_format == IMGFMT_YV12)
mp_tmsg(MSGT_VO,MSGL_WARN,
"Input not RGB, can't separate chrominance by fields!");
}
if (width % 2)
@ -148,20 +128,6 @@ static int config(uint32_t width, uint32_t height, uint32_t d_width,
return -1;
}
#ifdef CONFIG_LIBSWSCALE_INTERNALS
if(using_format != IMGFMT_YV12)
{
sws_rgb2rgb_init(get_sws_cpuflags());
rgb_buffer = malloc(image_width * image_height * 3);
if (!rgb_buffer)
{
mp_tmsg(MSGT_VO,MSGL_FATAL,
"Not enough memory to allocate RGB framebuffer.");
return -1;
}
}
#endif
write_bytes = image_width * image_height * 3 / 2;
image = malloc(write_bytes);
@ -186,44 +152,10 @@ static int config(uint32_t width, uint32_t height, uint32_t d_width,
return 0;
}
/* Only use when h divisable by 2! */
static void swap_fields(uint8_t *ptr, const int h, const int stride)
{
int i;
for (i=0; i<h; i +=2)
{
fast_memcpy(rgb_line_buffer , ptr + stride * i , stride);
fast_memcpy(ptr + stride * i , ptr + stride * (i+1), stride);
fast_memcpy(ptr + stride * (i+1), rgb_line_buffer , stride);
}
}
static void draw_alpha(int x0, int y0, int w, int h, unsigned char *src,
unsigned char *srca, int stride) {
switch (using_format)
{
case IMGFMT_YV12:
vo_draw_alpha_yv12(w, h, src, srca, stride,
image + y0 * image_width + x0, image_width);
break;
case IMGFMT_BGR|24:
case IMGFMT_RGB|24:
if (config_interlace != Y4M_ILACE_BOTTOM_FIRST)
vo_draw_alpha_rgb24(w, h, src, srca, stride,
rgb_buffer + (y0 * image_width + x0) * 3, image_width * 3);
else
{
swap_fields (rgb_buffer, image_height, image_width * 3);
vo_draw_alpha_rgb24(w, h, src, srca, stride,
rgb_buffer + (y0 * image_width + x0) * 3, image_width * 3);
swap_fields (rgb_buffer, image_height, image_width * 3);
}
break;
}
}
static void draw_osd(void)
@ -231,39 +163,6 @@ static void draw_osd(void)
vo_draw_text(image_width, image_height, draw_alpha);
}
#ifdef CONFIG_LIBSWSCALE_INTERNALS
static void deinterleave_fields(uint8_t *ptr, const int stride,
const int img_height)
{
unsigned int i, j, k_start = 1, modv = img_height - 1;
unsigned char *line_state = malloc(modv);
for (i=0; i<modv; i++)
line_state[i] = 0;
line_state[0] = 1;
while(k_start < modv)
{
i = j = k_start;
fast_memcpy(rgb_line_buffer, ptr + stride * i, stride);
while (!line_state[j])
{
line_state[j] = 1;
i = j;
j = j * 2 % modv;
fast_memcpy(ptr + stride * i, ptr + stride * j, stride);
}
fast_memcpy(ptr + stride * i, rgb_line_buffer, stride);
while(k_start < modv && line_state[k_start])
k_start++;
}
free(line_state);
}
#endif
static void vo_y4m_write(const void *ptr, const size_t num_bytes)
{
if (fwrite(ptr, 1, num_bytes, yuv_out) != num_bytes)
@ -273,50 +172,8 @@ static void vo_y4m_write(const void *ptr, const size_t num_bytes)
static int write_last_frame(void)
{
uint8_t *upper_y, *upper_u, *upper_v, *rgb_buffer_lower;
int rgb_stride, uv_stride, field_height;
unsigned int i, low_ofs;
fprintf(yuv_out, "FRAME\n");
if (using_format != IMGFMT_YV12)
{
rgb_stride = image_width * 3;
uv_stride = image_width / 2;
if (Y4M_IS_INTERLACED)
{
field_height = image_height / 2;
upper_y = image;
upper_u = upper_y + image_width * field_height;
upper_v = upper_u + image_width * field_height / 4;
low_ofs = image_width * field_height * 3 / 2;
rgb_buffer_lower = rgb_buffer + rgb_stride * field_height;
/* Write Y plane */
for(i = 0; i < field_height; i++)
{
vo_y4m_write(upper_y + image_width * i, image_width);
vo_y4m_write(upper_y + image_width * i + low_ofs, image_width);
}
/* Write U and V plane */
for(i = 0; i < field_height / 2; i++)
{
vo_y4m_write(upper_u + uv_stride * i, uv_stride);
vo_y4m_write(upper_u + uv_stride * i + low_ofs, uv_stride);
}
for(i = 0; i < field_height / 2; i++)
{
vo_y4m_write(upper_v + uv_stride * i, uv_stride);
vo_y4m_write(upper_v + uv_stride * i + low_ofs, uv_stride);
}
return VO_TRUE; /* Image written; We have to stop here */
}
}
/* Write progressive frame */
vo_y4m_write(image, write_bytes);
return VO_TRUE;
}
@ -325,64 +182,6 @@ static void flip_page (void)
{
fprintf(yuv_out, "FRAME\n");
#ifdef CONFIG_LIBSWSCALE_INTERNALS
if (using_format != IMGFMT_YV12)
{
uint8_t *upper_y, *upper_u, *upper_v, *rgb_buffer_lower;
int rgb_stride, uv_stride, field_height;
unsigned int i, low_ofs;
rgb_stride = image_width * 3;
uv_stride = image_width / 2;
if (Y4M_IS_INTERLACED)
{
field_height = image_height / 2;
upper_y = image;
upper_u = upper_y + image_width * field_height;
upper_v = upper_u + image_width * field_height / 4;
low_ofs = image_width * field_height * 3 / 2;
rgb_buffer_lower = rgb_buffer + rgb_stride * field_height;
deinterleave_fields(rgb_buffer, rgb_stride, image_height);
rgb24toyv12(rgb_buffer, upper_y, upper_u, upper_v,
image_width, field_height,
image_width, uv_stride, rgb_stride);
rgb24toyv12(rgb_buffer_lower, upper_y + low_ofs,
upper_u + low_ofs, upper_v + low_ofs,
image_width, field_height,
image_width, uv_stride, rgb_stride);
/* Write Y plane */
for(i = 0; i < field_height; i++)
{
vo_y4m_write(upper_y + image_width * i, image_width);
vo_y4m_write(upper_y + image_width * i + low_ofs, image_width);
}
/* Write U and V plane */
for(i = 0; i < field_height / 2; i++)
{
vo_y4m_write(upper_u + uv_stride * i, uv_stride);
vo_y4m_write(upper_u + uv_stride * i + low_ofs, uv_stride);
}
for(i = 0; i < field_height / 2; i++)
{
vo_y4m_write(upper_v + uv_stride * i, uv_stride);
vo_y4m_write(upper_v + uv_stride * i + low_ofs, uv_stride);
}
return; /* Image written; We have to stop here */
}
rgb24toyv12(rgb_buffer, image_y, image_u, image_v,
image_width, image_height,
image_width, uv_stride, rgb_stride);
}
#endif
/* Write progressive frame */
vo_y4m_write(image, write_bytes);
}
@ -391,10 +190,6 @@ static int draw_slice(uint8_t *srcimg[], int stride[], int w,int h,int x,int y)
int i;
uint8_t *dst, *src = srcimg[0];
switch (using_format)
{
case IMGFMT_YV12:
// copy Y:
dst = image_y + image_width * y + x;
for (i = 0; i < h; i++)
@ -420,72 +215,19 @@ static int draw_slice(uint8_t *srcimg[], int stride[], int w,int h,int x,int y)
dstv += imgstride;
}
}
break;
case IMGFMT_BGR24:
case IMGFMT_RGB24:
dst = rgb_buffer + (image_width * y + x) * 3;
for (i = 0; i < h; i++)
{
fast_memcpy(dst, src, w * 3);
src += stride[0];
dst += image_width * 3;
}
break;
}
return 0;
}
static int draw_frame(uint8_t * src[])
{
switch(using_format)
{
case IMGFMT_YV12:
// gets done in draw_slice
break;
case IMGFMT_BGR24:
case IMGFMT_RGB24:
fast_memcpy(rgb_buffer, src[0], image_width * image_height * 3);
break;
}
return 0;
}
static int query_format(uint32_t format)
{
if (Y4M_IS_INTERLACED)
{
/* When processing interlaced material we want to get the raw RGB
* data and do the YV12 conversion ourselves to have the chrominance
* information sampled correct. */
switch(format)
{
case IMGFMT_YV12:
return VFCAP_CSP_SUPPORTED|VFCAP_OSD|VFCAP_ACCEPT_STRIDE;
#ifdef CONFIG_LIBSWSCALE_INTERNALS
case IMGFMT_BGR|24:
case IMGFMT_RGB|24:
return VFCAP_CSP_SUPPORTED|VFCAP_CSP_SUPPORTED_BY_HW|VFCAP_OSD|VFCAP_ACCEPT_STRIDE;
#endif
}
}
else
{
switch(format)
{
case IMGFMT_YV12:
return VFCAP_CSP_SUPPORTED|VFCAP_CSP_SUPPORTED_BY_HW|VFCAP_OSD|VFCAP_ACCEPT_STRIDE;
#ifdef CONFIG_LIBSWSCALE_INTERNALS
case IMGFMT_BGR|24:
case IMGFMT_RGB|24:
return VFCAP_CSP_SUPPORTED|VFCAP_OSD|VFCAP_ACCEPT_STRIDE;
#endif
}
}
if (format == IMGFMT_YV12)
return VFCAP_CSP_SUPPORTED|VFCAP_CSP_SUPPORTED_BY_HW|VFCAP_OSD|VFCAP_ACCEPT_STRIDE;
return 0;
}
@ -500,14 +242,6 @@ static void uninit(void)
fclose(yuv_out);
yuv_out = NULL;
if(rgb_buffer)
free(rgb_buffer);
rgb_buffer = NULL;
if(rgb_line_buffer)
free(rgb_line_buffer);
rgb_line_buffer = NULL;
if (yuv_filename)
free(yuv_filename);
yuv_filename = NULL;

View File

@ -3849,7 +3849,7 @@ if(mpctx->sh_video){
if(!mpctx->sh_video->fps && !force_fps){
mp_tmsg(MSGT_CPLAYER,MSGL_ERR,"FPS not specified in the header or invalid, use the -fps option.\n");
mpctx->sh_video=mpctx->d_video->sh=NULL;
mpctx->opts.correct_pts = 1;
}
}