video: display last frame, drain frames on video reconfig

Until now, the player didn't care to drain frames on video reconfig.
Instead, the VO was reconfigured (i.e. resized) before the queued frames
finished displaying. This can for example be observed by passing
multiple images with different size as mf:// filename. Then the window
would resize one frame before image with the new size is displayed. With
--vo=vdpau, the effect is worse, because this VO queues more than 1
frame internally.

Fix this by explicitly draining buffered frames before video reconfig.

Raise the display time of the last frame. Otherwise, the last frame
would be shown for a very short time only. This usually doesn't matter,
but helps when playing image files. This is a byproduct of frame
draining, because normally, video timing is based on the frames queued
to the VO, and we can't do that with frames of different size or format.
So we pretend that the frame before the change is the last frame in
order to time it. This code is incorrect though: it tries to use the
framerate, which often doesn't make sense. But it's good enough to test
this code with mf://.
This commit is contained in:
wm4 2013-12-10 19:33:11 +01:00
parent 5135e93d0c
commit 227d087db6
5 changed files with 92 additions and 10 deletions

View File

@ -236,6 +236,11 @@ typedef struct MPContext {
// the goal of making flip() calls finish (rather than start) at the
// specified time.
double last_vo_flip_duration;
// Display duration (as "intended") of the last flipped frame.
double last_frame_duration;
// Set to true some time after a new frame has been shown, and it turns out
// that this frame was the last one before video ends.
bool playing_last_frame;
// How much video timing has been changed to make it match the audio
// timeline. Used for status line information only.
double total_avsync_change;
@ -283,8 +288,8 @@ typedef struct MPContext {
double start_timestamp;
// Timestamp from the last time some timing functions read the
// current time, in (occasionally wrapping) microseconds. Used
// to turn a new time value to a delta from last time.
// current time, in microseconds.
// Used to turn a new time value to a delta from last time.
int64_t last_time;
// Used to communicate the parameters of a seek between parts
@ -439,5 +444,6 @@ int reinit_video_filters(struct MPContext *mpctx);
double update_video(struct MPContext *mpctx, double endpts);
void mp_force_video_refresh(struct MPContext *mpctx);
void update_fps(struct MPContext *mpctx);
void video_execute_format_change(struct MPContext *mpctx);
#endif /* MPLAYER_MP_CORE_H */

View File

@ -184,6 +184,8 @@ static void seek_reset(struct MPContext *mpctx, bool reset_ao)
mpctx->video_pts = MP_NOPTS_VALUE;
mpctx->video_next_pts = MP_NOPTS_VALUE;
mpctx->playing_last_frame = false;
mpctx->last_frame_duration = 0;
mpctx->delay = 0;
mpctx->time_frame = 0;
mpctx->restart_playback = true;
@ -990,9 +992,31 @@ void run_playloop(struct MPContext *mpctx)
struct vo *vo = mpctx->video_out;
update_fps(mpctx);
video_left = vo->hasframe || vo->frame_loaded;
video_left = vo->hasframe || vo->frame_loaded || mpctx->playing_last_frame;
if (!vo->frame_loaded && (!mpctx->paused || mpctx->restart_playback)) {
double frame_time = update_video(mpctx, endpts);
if (frame_time < 0) {
if (!mpctx->playing_last_frame && mpctx->last_frame_duration > 0) {
mpctx->time_frame += mpctx->last_frame_duration;
mpctx->last_frame_duration = 0;
mpctx->playing_last_frame = true;
}
if (mpctx->playing_last_frame) {
frame_time = 0; // don't stop playback yet
} else if (mpctx->d_video->waiting_decoded_mpi) {
// Format changes behave like EOF, and this call "unstucks"
// the EOF condition (after waiting for the previous frame
// to finish displaying).
video_execute_format_change(mpctx);
frame_time = update_video(mpctx, endpts);
// We just displayed the previous frame, so display the
// new frame immediately.
if (frame_time > 0)
frame_time = 0;
}
}
mp_dbg(MSGT_AVSYNC, MSGL_DBG2, "*** ftime=%5.3f ***\n", frame_time);
if (mpctx->d_video->vfilter && mpctx->d_video->vfilter->initialized < 0)
{
@ -1027,7 +1051,7 @@ void run_playloop(struct MPContext *mpctx)
if (!video_left || (mpctx->paused && !mpctx->restart_playback))
break;
if (!vo->frame_loaded) {
if (!vo->frame_loaded && !mpctx->playing_last_frame) {
sleeptime = 0;
break;
}
@ -1071,6 +1095,11 @@ void run_playloop(struct MPContext *mpctx)
break;
}
sleeptime = 0;
mpctx->playing_last_frame = false;
// last frame case (don't set video_left - consider format changes)
if (!vo->frame_loaded)
break;
//=================== FLIP PAGE (VIDEO BLT): ======================
@ -1096,8 +1125,14 @@ void run_playloop(struct MPContext *mpctx)
int64_t pts_us = mpctx->last_time + time_frame * 1e6;
int duration = -1;
double pts2 = vo->next_pts2;
if (pts2 != MP_NOPTS_VALUE && opts->correct_pts &&
!mpctx->restart_playback) {
if (mpctx->video_pts != MP_NOPTS_VALUE && pts2 == MP_NOPTS_VALUE) {
// Make up a frame duration. Using the frame rate is not a good
// choice, since the frame rate could be unset/broken/random.
float fps = mpctx->d_video->fps;
double frame_time = fps > 0 ? 1.0 / fps : 0;
pts2 = mpctx->video_pts + frame_time;
}
if (pts2 != MP_NOPTS_VALUE) {
// expected A/V sync correction is ignored
double diff = (pts2 - mpctx->video_pts);
diff /= opts->playback_speed;
@ -1108,7 +1143,10 @@ void run_playloop(struct MPContext *mpctx)
if (diff > 10)
diff = 10;
duration = diff * 1e6;
mpctx->last_frame_duration = diff;
}
if (mpctx->restart_playback)
duration = -1;
vo_flip_page(vo, pts_us | 1, duration);
mpctx->last_vo_flip_duration = (mp_time_us() - t2) * 0.000001;

View File

@ -192,6 +192,8 @@ int reinit_video_chain(struct MPContext *mpctx)
mpctx->sync_audio_to_video = !sh->attached_picture;
mpctx->delay = 0;
mpctx->video_next_pts = MP_NOPTS_VALUE;
mpctx->playing_last_frame = false;
mpctx->last_frame_duration = 0;
mpctx->vo_pts_history_seek_ts++;
vo_seek_reset(mpctx->video_out);
@ -266,7 +268,8 @@ static void init_filter_params(struct MPContext *mpctx)
mp_property_do("deinterlace", M_PROPERTY_SET, &opts->deinterlace, mpctx);
}
static void filter_video(struct MPContext *mpctx, struct mp_image *frame)
static void filter_video(struct MPContext *mpctx, struct mp_image *frame,
bool reconfig_ok)
{
struct dec_video *d_video = mpctx->d_video;
@ -275,6 +278,13 @@ static void filter_video(struct MPContext *mpctx, struct mp_image *frame)
if (!mp_image_params_equals(&d_video->decoder_output, &params) ||
d_video->vfilter->initialized < 1)
{
// In case we want to wait until filter chain is drained
if (!reconfig_ok) {
talloc_free(d_video->waiting_decoded_mpi);
d_video->waiting_decoded_mpi = frame;
return;
}
reconfig_video(mpctx, &params, false);
if (d_video->vfilter->initialized > 0)
init_filter_params(mpctx);
@ -290,6 +300,20 @@ static void filter_video(struct MPContext *mpctx, struct mp_image *frame)
filter_output_queued_frame(mpctx);
}
// Reconfigure the video chain and the VO on a format change. This is separate,
// because we wait with the reconfig until the currently buffered video has
// finished displaying. Otherwise, we'd resize the window and then wait for the
// video finishing, which would result in a black window for that frame.
// Does nothing if there was no pending change.
void video_execute_format_change(struct MPContext *mpctx)
{
struct dec_video *d_video = mpctx->d_video;
struct mp_image *decoded_frame = d_video->waiting_decoded_mpi;
d_video->waiting_decoded_mpi = NULL;
if (decoded_frame)
filter_video(mpctx, decoded_frame, true);
}
static int check_framedrop(struct MPContext *mpctx, double frame_time)
{
struct MPOpts *opts = mpctx->opts;
@ -326,7 +350,7 @@ static double update_video_attached_pic(struct MPContext *mpctx)
struct mp_image *decoded_frame =
video_decode(d_video, d_video->header->attached_picture, 0);
if (decoded_frame)
filter_video(mpctx, decoded_frame);
filter_video(mpctx, decoded_frame, true);
load_next_vo_frame(mpctx, true);
mpctx->video_next_pts = MP_NOPTS_VALUE;
return 0;
@ -340,7 +364,14 @@ double update_video(struct MPContext *mpctx, double endpts)
if (d_video->header->attached_picture)
return update_video_attached_pic(mpctx);
if (!load_next_vo_frame(mpctx, false)) {
if (load_next_vo_frame(mpctx, false)) {
// Use currently queued VO frame
} else if (d_video->waiting_decoded_mpi) {
// Draining on reconfig
if (!load_next_vo_frame(mpctx, true))
return -1;
} else {
// Decode a new frame
struct demux_packet *pkt = demux_read_packet(d_video->header);
if (pkt && pkt->pts != MP_NOPTS_VALUE)
pkt->pts += mpctx->video_offset;
@ -355,13 +386,15 @@ double update_video(struct MPContext *mpctx, double endpts)
video_decode(d_video, pkt, framedrop_type);
talloc_free(pkt);
if (decoded_frame) {
filter_video(mpctx, decoded_frame);
filter_video(mpctx, decoded_frame, false);
} else if (!pkt) {
if (!load_next_vo_frame(mpctx, true))
return -1;
}
}
// Whether the VO has an image queued.
// If it does, it will be used to time and display the next frame.
if (!video_out->frame_loaded)
return 0;

View File

@ -61,6 +61,7 @@ void video_reset_decoding(struct dec_video *d_video)
video_vd_control(d_video, VDCTRL_RESET, NULL);
if (d_video->vfilter && d_video->vfilter->initialized == 1)
vf_seek_reset(d_video->vfilter);
mp_image_unrefp(&d_video->waiting_decoded_mpi);
d_video->num_buffered_pts = 0;
d_video->last_pts = MP_NOPTS_VALUE;
d_video->last_packet_pdts = MP_NOPTS_VALUE;
@ -116,6 +117,7 @@ int video_get_colors(struct dec_video *d_video, const char *item, int *value)
void video_uninit(struct dec_video *d_video)
{
mp_image_unrefp(&d_video->waiting_decoded_mpi);
if (d_video->vd_driver) {
mp_tmsg(MSGT_DECVIDEO, MSGL_V, "Uninit video.\n");
d_video->vd_driver->uninit(d_video);

View File

@ -41,6 +41,9 @@ struct dec_video {
struct mp_image_params decoder_output; // last output of the decoder
struct mp_image_params vf_input; // video filter input params
// Used temporarily during format changes
struct mp_image *waiting_decoded_mpi;
void *priv; // for free use by vd_driver
// Last PTS from decoder (set with each vd_driver->decode() call)