From a748b6270934717063797aba646072b95f141acc Mon Sep 17 00:00:00 2001 From: wm4 Date: Tue, 8 Apr 2014 19:02:57 +0200 Subject: [PATCH] vo_vdpau: simplify previous vsync timestamp calculation The strange thing about this code was the shift parameter of the prev_vs2 function. The parameter is used to handle timestamps before the last vsync, since the % operator handles negative values incorrectly. Most callers set shift to 0, and _usually_ pass a timestamp after the last vsync. One caller sets it to 16, and can pass a timestamp before the last timestamp. The mystery is why prev_vs2 doesn't just compensate for the % operator semantics in the most simple way: if the result of the operator is negative, add the divisor to it. Instead, it adds a huge value to it (how huge is influenced by shift). If shift is 0, the result of the function will not be aligned to vsyncs. I have no idea why it was written in this way. Were there concerns about certain numeric overflows that could happen in the calculations? But I can't think of any (the difference between ts and vc->recent_vsync_time is usually not that huge). Or is there something more clever about it, which is important for the timing code? I can't think of anything either. So scrap it and simplify it. --- video/out/vo_vdpau.c | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/video/out/vo_vdpau.c b/video/out/vo_vdpau.c index 5223dd54b7..47e8bd95e2 100644 --- a/video/out/vo_vdpau.c +++ b/video/out/vo_vdpau.c @@ -994,12 +994,13 @@ static int update_presentation_queue_status(struct vo *vo) return num_queued; } -static inline uint64_t prev_vs2(struct vdpctx *vc, uint64_t ts, int shift) +// Return the timestamp of the vsync that must have happened before ts. +static inline uint64_t prev_vsync(struct vdpctx *vc, uint64_t ts) { - uint64_t offset = ts - vc->recent_vsync_time; - // Fix negative values for 1<recent_vsync_time - offset += (uint64_t)vc->vsync_interval << shift; - offset %= vc->vsync_interval; + int64_t diff = (int64_t)(ts - vc->recent_vsync_time); + int64_t offset = diff % vc->vsync_interval; + if (offset < 0) + offset += vc->vsync_interval; return ts - offset; } @@ -1034,9 +1035,7 @@ static void flip_page_timed(struct vo *vo, int64_t pts_us, int duration) uint64_t ideal_pts = pts; uint64_t npts = duration >= 0 ? pts + duration : UINT64_MAX; -#define PREV_VS2(ts, shift) prev_vs2(vc, ts, shift) - // Only gives accurate results for ts >= vc->recent_vsync_time -#define PREV_VSYNC(ts) PREV_VS2(ts, 0) +#define PREV_VSYNC(ts) prev_vsync(vc, ts) /* We hope to be here at least one vsync before the frame should be shown. * If we are running late then don't drop the frame unless there is @@ -1065,7 +1064,7 @@ static void flip_page_timed(struct vo *vo, int64_t pts_us, int duration) */ uint64_t vsync = PREV_VSYNC(pts); if (pts < vsync + vsync_interval / 4 - && (vsync - PREV_VS2(vc->last_queue_time, 16) + && (vsync - PREV_VSYNC(vc->last_queue_time) > pts - vc->last_ideal_time + vsync_interval / 2 || vc->dropped_frame && vsync > vc->dropped_time)) pts -= vsync_interval / 2;