mirror of https://github.com/mpv-player/mpv
vo: change vsync base to nanoseconds
There is no reason to use microseconds precision. We have precise timers all all relevant platforms.
This commit is contained in:
parent
cdfd5c280a
commit
df764bc0c3
|
@ -835,7 +835,7 @@ static void handle_display_sync_frame(struct MPContext *mpctx,
|
|||
if (resample && using_spdif_passthrough(mpctx))
|
||||
return;
|
||||
|
||||
double vsync = vo_get_vsync_interval(vo) / 1e6;
|
||||
double vsync = vo_get_vsync_interval(vo) / 1e9;
|
||||
if (vsync <= 0)
|
||||
return;
|
||||
|
||||
|
|
|
@ -237,22 +237,22 @@ static bool d3d11_submit_frame(struct ra_swapchain *sw,
|
|||
return true;
|
||||
}
|
||||
|
||||
static int64_t qpc_to_us(struct ra_swapchain *sw, int64_t qpc)
|
||||
static int64_t qpc_to_ns(struct ra_swapchain *sw, int64_t qpc)
|
||||
{
|
||||
struct priv *p = sw->priv;
|
||||
|
||||
// Convert QPC units (1/perf_freq seconds) to microseconds. This will work
|
||||
// Convert QPC units (1/perf_freq seconds) to nanoseconds. This will work
|
||||
// without overflow because the QPC value is guaranteed not to roll-over
|
||||
// within 100 years, so perf_freq must be less than 2.9*10^9.
|
||||
return qpc / p->perf_freq * 1000000 +
|
||||
qpc % p->perf_freq * 1000000 / p->perf_freq;
|
||||
return qpc / p->perf_freq * INT64_C(1000000000) +
|
||||
qpc % p->perf_freq * INT64_C(1000000000) / p->perf_freq;
|
||||
}
|
||||
|
||||
static int64_t qpc_us_now(struct ra_swapchain *sw)
|
||||
static int64_t qpc_ns_now(struct ra_swapchain *sw)
|
||||
{
|
||||
LARGE_INTEGER perf_count;
|
||||
QueryPerformanceCounter(&perf_count);
|
||||
return qpc_to_us(sw, perf_count.QuadPart);
|
||||
return qpc_to_ns(sw, perf_count.QuadPart);
|
||||
}
|
||||
|
||||
static void d3d11_swap_buffers(struct ra_swapchain *sw)
|
||||
|
@ -330,7 +330,7 @@ static void d3d11_get_vsync(struct ra_swapchain *sw, struct vo_vsync_info *info)
|
|||
if (src_passed && sqt_passed)
|
||||
p->vsync_duration_qpc = sqt_passed / src_passed;
|
||||
if (p->vsync_duration_qpc)
|
||||
info->vsync_duration = qpc_to_us(sw, p->vsync_duration_qpc);
|
||||
info->vsync_duration = qpc_to_ns(sw, p->vsync_duration_qpc);
|
||||
|
||||
// If the physical frame rate is known and the other members of
|
||||
// DXGI_FRAME_STATISTICS are non-0, estimate the timing of the next frame
|
||||
|
@ -353,8 +353,8 @@ static void d3d11_get_vsync(struct ra_swapchain *sw, struct vo_vsync_info *info)
|
|||
// Only set the estimated display time if it's after the last submission
|
||||
// time. It could be before if mpv skips a lot of frames.
|
||||
if (last_queue_display_time_qpc >= p->last_submit_qpc) {
|
||||
info->last_queue_display_time = mp_time_us() +
|
||||
(qpc_to_us(sw, last_queue_display_time_qpc) - qpc_us_now(sw));
|
||||
info->last_queue_display_time = mp_time_ns() +
|
||||
(qpc_to_ns(sw, last_queue_display_time_qpc) - qpc_ns_now(sw));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -943,14 +943,14 @@ static void drm_pflip_cb(int fd, unsigned int msc, unsigned int sec,
|
|||
struct timespec ts;
|
||||
if (clock_gettime(CLOCK_MONOTONIC, &ts))
|
||||
goto fail;
|
||||
const uint64_t now_monotonic = ts.tv_sec * 1000000LL + ts.tv_nsec / 1000;
|
||||
const uint64_t ust_mp_time = mp_time_us() - (now_monotonic - vsync->ust);
|
||||
int64_t now_monotonic = ts.tv_sec * UINT64_C(1000000000) + ts.tv_nsec;
|
||||
int64_t ust_mp_time = mp_time_ns() - (now_monotonic - vsync->ust * 1000);
|
||||
|
||||
const uint64_t ust_since_enqueue = vsync->ust - frame_vsync->ust;
|
||||
const unsigned int msc_since_enqueue = vsync->msc - frame_vsync->msc;
|
||||
const unsigned int sbc_since_enqueue = vsync->sbc - frame_vsync->sbc;
|
||||
|
||||
vsync_info->vsync_duration = ust_since_enqueue / msc_since_enqueue;
|
||||
vsync_info->vsync_duration = ust_since_enqueue * 1000 / msc_since_enqueue;
|
||||
vsync_info->skipped_vsyncs = msc_since_last_flip - 1; // Valid iff swap_buffers is called every vsync
|
||||
vsync_info->last_queue_display_time = ust_mp_time + (sbc_since_enqueue * vsync_info->vsync_duration);
|
||||
}
|
||||
|
|
|
@ -70,8 +70,8 @@ void present_sync_swap(struct mp_present *present)
|
|||
if (clock_gettime(CLOCK_MONOTONIC, &ts))
|
||||
return;
|
||||
|
||||
int64_t now_monotonic = ts.tv_sec * 1000000LL + ts.tv_nsec / 1000;
|
||||
int64_t ust_mp_time = mp_time_us() - (now_monotonic - ust);
|
||||
int64_t now_monotonic = ts.tv_sec * UINT64_C(1000000000) + ts.tv_nsec;
|
||||
int64_t ust_mp_time = mp_time_ns() - (now_monotonic - ust);
|
||||
|
||||
present->last_queue_display_time = ust_mp_time;
|
||||
}
|
||||
|
|
|
@ -151,7 +151,7 @@ struct vo_internal {
|
|||
int64_t *vsync_samples;
|
||||
int num_vsync_samples;
|
||||
int64_t num_total_vsync_samples;
|
||||
double prev_vsync;
|
||||
int64_t prev_vsync;
|
||||
double base_vsync;
|
||||
int drop_point;
|
||||
double estimated_vsync_interval;
|
||||
|
@ -437,8 +437,8 @@ static void check_estimated_display_fps(struct vo *vo)
|
|||
|
||||
bool use_estimated = false;
|
||||
if (in->num_total_vsync_samples >= MAX_VSYNC_SAMPLES / 2 &&
|
||||
in->estimated_vsync_interval <= 1e6 / 20.0 &&
|
||||
in->estimated_vsync_interval >= 1e6 / 99.0)
|
||||
in->estimated_vsync_interval <= 1e9 / 20.0 &&
|
||||
in->estimated_vsync_interval >= 1e9 / 99.0)
|
||||
{
|
||||
for (int n = 0; n < in->num_vsync_samples; n++) {
|
||||
if (fabs(in->vsync_samples[n] - in->estimated_vsync_interval)
|
||||
|
@ -451,13 +451,13 @@ static void check_estimated_display_fps(struct vo *vo)
|
|||
use_estimated = true;
|
||||
done: ;
|
||||
}
|
||||
if (use_estimated == (in->vsync_interval == in->nominal_vsync_interval)) {
|
||||
if (use_estimated == (fabs(in->vsync_interval - in->nominal_vsync_interval) < 1e9)) {
|
||||
if (use_estimated) {
|
||||
MP_VERBOSE(vo, "adjusting display FPS to a value closer to %.3f Hz\n",
|
||||
1e6 / in->estimated_vsync_interval);
|
||||
1e9 / in->estimated_vsync_interval);
|
||||
} else {
|
||||
MP_VERBOSE(vo, "switching back to assuming display fps = %.3f Hz\n",
|
||||
1e6 / in->nominal_vsync_interval);
|
||||
1e9 / in->nominal_vsync_interval);
|
||||
}
|
||||
}
|
||||
in->vsync_interval = use_estimated ? in->estimated_vsync_interval
|
||||
|
@ -537,7 +537,7 @@ static void update_vsync_timing_after_swap(struct vo *vo,
|
|||
vsync_skip_detection(vo);
|
||||
|
||||
MP_STATS(vo, "value %f jitter", in->estimated_vsync_jitter);
|
||||
MP_STATS(vo, "value %f vsync-diff", in->vsync_samples[0] / 1e6);
|
||||
MP_STATS(vo, "value %f vsync-diff", in->vsync_samples[0] / 1e9);
|
||||
}
|
||||
|
||||
// to be called from VO thread only
|
||||
|
@ -563,7 +563,7 @@ static void update_display_fps(struct vo *vo)
|
|||
display_fps = in->reported_display_fps;
|
||||
|
||||
if (in->display_fps != display_fps) {
|
||||
in->nominal_vsync_interval = display_fps > 0 ? 1e6 / display_fps : 0;
|
||||
in->nominal_vsync_interval = display_fps > 0 ? 1e9 / display_fps : 0;
|
||||
in->vsync_interval = MPMAX(in->nominal_vsync_interval, 1);
|
||||
in->display_fps = display_fps;
|
||||
|
||||
|
@ -926,7 +926,7 @@ static bool render_frame(struct vo *vo)
|
|||
in->dropped_frame &= frame->can_drop;
|
||||
// Even if we're hopelessly behind, rather degrade to 10 FPS playback,
|
||||
// instead of just freezing the display forever.
|
||||
in->dropped_frame &= now - in->prev_vsync < 100 * 1000;
|
||||
in->dropped_frame &= now - (in->prev_vsync / 1000.0) < 100 * 1000;
|
||||
in->dropped_frame &= in->hasframe_rendered;
|
||||
|
||||
// Setup parameters for the next time this frame is drawn. ("frame" is the
|
||||
|
@ -944,7 +944,7 @@ static bool render_frame(struct vo *vo)
|
|||
|
||||
bool use_vsync = in->current_frame->display_synced && !in->paused;
|
||||
if (use_vsync && !in->expecting_vsync) // first DS frame in a row
|
||||
in->prev_vsync = now;
|
||||
in->prev_vsync = now * 1000;
|
||||
in->expecting_vsync = use_vsync;
|
||||
|
||||
// Store the initial value before we unlock.
|
||||
|
@ -991,7 +991,7 @@ static bool render_frame(struct vo *vo)
|
|||
|
||||
// Make up some crap if presentation feedback is missing.
|
||||
if (vsync.last_queue_display_time < 0)
|
||||
vsync.last_queue_display_time = mp_time_us();
|
||||
vsync.last_queue_display_time = mp_time_ns();
|
||||
|
||||
stats_time_end(in->stats, "video-flip");
|
||||
|
||||
|
@ -1303,7 +1303,7 @@ double vo_get_estimated_vsync_interval(struct vo *vo)
|
|||
{
|
||||
struct vo_internal *in = vo->in;
|
||||
pthread_mutex_lock(&in->lock);
|
||||
double res = in->estimated_vsync_interval / 1e6;
|
||||
double res = in->estimated_vsync_interval / 1e9;
|
||||
pthread_mutex_unlock(&in->lock);
|
||||
return res;
|
||||
}
|
||||
|
@ -1337,7 +1337,7 @@ double vo_get_delay(struct vo *vo)
|
|||
res = 0;
|
||||
}
|
||||
pthread_mutex_unlock(&in->lock);
|
||||
return res ? (res - mp_time_us()) / 1e6 : 0;
|
||||
return res ? (res - mp_time_ns()) / 1e9 : 0;
|
||||
}
|
||||
|
||||
void vo_discard_timing_info(struct vo *vo)
|
||||
|
|
|
@ -268,13 +268,13 @@ struct vo_frame {
|
|||
// Presentation feedback. See get_vsync() for how backends should fill this
|
||||
// struct.
|
||||
struct vo_vsync_info {
|
||||
// mp_time_us() timestamp at which the last queued frame will likely be
|
||||
// mp_time_ns() timestamp at which the last queued frame will likely be
|
||||
// displayed (this is in the future, unless the frame is instantly output).
|
||||
// -1 if unset or unsupported.
|
||||
// This implies the latency of the output.
|
||||
int64_t last_queue_display_time;
|
||||
|
||||
// Time between 2 vsync events in microseconds. The difference should be the
|
||||
// Time between 2 vsync events in nanoseconds. The difference should be the
|
||||
// from 2 times sampled from the same reference point (it should not be the
|
||||
// difference between e.g. the end of scanout and the start of the next one;
|
||||
// it must be continuous).
|
||||
|
|
|
@ -766,7 +766,7 @@ static void flip_page(struct vo *vo)
|
|||
if (vc->user_fps > 0) {
|
||||
vc->vsync_interval = 1e9 / vc->user_fps;
|
||||
} else if (vc->user_fps == 0) {
|
||||
vc->vsync_interval = vo_get_vsync_interval(vo) * 1000;
|
||||
vc->vsync_interval = vo_get_vsync_interval(vo);
|
||||
}
|
||||
vc->vsync_interval = MPMAX(vc->vsync_interval, 1);
|
||||
|
||||
|
@ -782,7 +782,7 @@ static void flip_page(struct vo *vo)
|
|||
vdp_st = vdp->presentation_queue_get_time(vc->flip_queue, &vdp_time);
|
||||
CHECK_VDP_WARNING(vo, "Error when calling vdp_presentation_queue_get_time");
|
||||
|
||||
int64_t rel_pts_ns = (pts_us - mp_time_us()) * 1000;
|
||||
int64_t rel_pts_ns = (pts_us * 1000) - mp_time_ns();
|
||||
if (!pts_us || rel_pts_ns < 0)
|
||||
rel_pts_ns = 0;
|
||||
|
||||
|
|
|
@ -1144,7 +1144,7 @@ static void feedback_presented(void *data, struct wp_presentation_feedback *fbac
|
|||
if (fback)
|
||||
remove_feedback(fback_pool, fback);
|
||||
|
||||
wl->refresh_interval = (int64_t)refresh_nsec / 1000;
|
||||
wl->refresh_interval = (int64_t)refresh_nsec;
|
||||
|
||||
// Very similar to oml_sync_control, in this case we assume that every
|
||||
// time the compositor receives feedback, a buffer swap has been already
|
||||
|
@ -1156,7 +1156,7 @@ static void feedback_presented(void *data, struct wp_presentation_feedback *fbac
|
|||
// - these values are updated every time the compositor receives feedback.
|
||||
|
||||
int64_t sec = (uint64_t) tv_sec_lo + ((uint64_t) tv_sec_hi << 32);
|
||||
int64_t ust = sec * 1000000LL + (uint64_t) tv_nsec / 1000;
|
||||
int64_t ust = sec * UINT64_C(1000000000) + (uint64_t) tv_nsec;
|
||||
int64_t msc = (uint64_t) seq_lo + ((uint64_t) seq_hi << 32);
|
||||
present_update_sync_values(wl->present, ust, msc);
|
||||
}
|
||||
|
@ -2566,18 +2566,18 @@ void vo_wayland_wait_frame(struct vo_wayland_state *wl)
|
|||
vblank_time = wl->refresh_interval;
|
||||
|
||||
if (vblank_time <= 0 && wl->current_output->refresh_rate > 0)
|
||||
vblank_time = 1e6 / wl->current_output->refresh_rate;
|
||||
vblank_time = 1e9 / wl->current_output->refresh_rate;
|
||||
|
||||
// Ideally you should never reach this point.
|
||||
if (vblank_time <= 0)
|
||||
vblank_time = 1e6 / 60;
|
||||
vblank_time = 1e9 / 60;
|
||||
|
||||
// Completely arbitrary amount of additional time to wait.
|
||||
vblank_time += 0.05 * vblank_time;
|
||||
int64_t finish_time = mp_time_us() + vblank_time;
|
||||
int64_t finish_time = mp_time_ns() + vblank_time;
|
||||
|
||||
while (wl->frame_wait && finish_time > mp_time_us()) {
|
||||
int poll_time = ceil((double)(finish_time - mp_time_us()) / 1000);
|
||||
while (wl->frame_wait && finish_time > mp_time_ns()) {
|
||||
int poll_time = ceil((finish_time - mp_time_ns()) / 1e6);
|
||||
if (poll_time < 0) {
|
||||
poll_time = 0;
|
||||
}
|
||||
|
|
|
@ -1327,7 +1327,8 @@ void vo_x11_check_events(struct vo *vo)
|
|||
if (cookie->evtype == PresentCompleteNotify) {
|
||||
XPresentCompleteNotifyEvent *present_event;
|
||||
present_event = (XPresentCompleteNotifyEvent *)cookie->data;
|
||||
present_update_sync_values(x11->present, present_event->ust,
|
||||
present_update_sync_values(x11->present,
|
||||
present_event->ust * 1000,
|
||||
present_event->msc);
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue