mirror of
https://github.com/mpv-player/mpv
synced 2025-04-11 04:01:31 +00:00
audio: change playback restart and resyncing
This commit makes audio decoding non-blocking. If e.g. the network is
too slow the playloop will just go to sleep, instead of blocking until
enough data is available.
For video, this was already done with commit 7083f88c
. For audio, it's
unfortunately much more complicated, because the audio decoder was used
in a blocking manner. Large changes are required to get around this.
The whole playback restart mechanism must be turned into a statemachine,
especially since it has close interactions with video restart. Lots of
video code is thus also changed.
(For the record, I don't think switching this code to threads would
make this conceptually easier: the code would still have to deal with
external input while blocked, so these in-between states do get visible
[and thus need to be handled] anyway. On the other hand, it certainly
should be possible to modularize this code a bit better.)
This will probably cause a bunch of regressions.
This commit is contained in:
parent
58255e0e2b
commit
261506e36e
@ -309,8 +309,10 @@ static int decode_packet(struct dec_audio *da)
|
|||||||
mp_audio_set_null_data(&da->decoded);
|
mp_audio_set_null_data(&da->decoded);
|
||||||
|
|
||||||
struct demux_packet *mpkt = priv->packet;
|
struct demux_packet *mpkt = priv->packet;
|
||||||
if (!mpkt)
|
if (!mpkt) {
|
||||||
mpkt = demux_read_packet(da->header);
|
if (demux_read_packet_async(da->header, &mpkt) == 0)
|
||||||
|
return AD_WAIT;
|
||||||
|
}
|
||||||
|
|
||||||
priv->packet = talloc_steal(priv, mpkt);
|
priv->packet = talloc_steal(priv, mpkt);
|
||||||
|
|
||||||
@ -343,7 +345,7 @@ static int decode_packet(struct dec_audio *da)
|
|||||||
}
|
}
|
||||||
// LATM may need many packets to find mux info
|
// LATM may need many packets to find mux info
|
||||||
if (ret == AVERROR(EAGAIN))
|
if (ret == AVERROR(EAGAIN))
|
||||||
return 0;
|
return AD_OK;
|
||||||
}
|
}
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
MP_ERR(da, "Error decoding audio.\n");
|
MP_ERR(da, "Error decoding audio.\n");
|
||||||
|
@ -218,7 +218,9 @@ static int decode_packet(struct dec_audio *da)
|
|||||||
|
|
||||||
mp_audio_set_null_data(&da->decoded);
|
mp_audio_set_null_data(&da->decoded);
|
||||||
|
|
||||||
struct demux_packet *pkt = demux_read_packet(da->header);
|
struct demux_packet *pkt;
|
||||||
|
if (demux_read_packet_async(da->header, &pkt) == 0)
|
||||||
|
return AD_WAIT;
|
||||||
if (!pkt)
|
if (!pkt)
|
||||||
return AD_EOF;
|
return AD_EOF;
|
||||||
|
|
||||||
|
@ -191,7 +191,10 @@ static int decode_packet(struct dec_audio *da)
|
|||||||
|
|
||||||
spdif_ctx->out_buffer_len = 0;
|
spdif_ctx->out_buffer_len = 0;
|
||||||
|
|
||||||
struct demux_packet *mpkt = demux_read_packet(da->header);
|
struct demux_packet *mpkt;
|
||||||
|
if (demux_read_packet_async(da->header, &mpkt) == 0)
|
||||||
|
return AD_WAIT;
|
||||||
|
|
||||||
if (!mpkt)
|
if (!mpkt)
|
||||||
return AD_EOF;
|
return AD_EOF;
|
||||||
|
|
||||||
|
@ -89,27 +89,7 @@ static int init_audio_codec(struct dec_audio *d_audio, const char *decoder)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Decode enough until we know the audio format.
|
|
||||||
for (int tries = 1; ; tries++) {
|
|
||||||
if (mp_audio_config_valid(&d_audio->decoded)) {
|
|
||||||
MP_VERBOSE(d_audio, "Initial decode succeeded after %d packets.\n",
|
|
||||||
tries);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if (tries >= 50) {
|
|
||||||
MP_ERR(d_audio, "initial decode failed\n");
|
|
||||||
uninit_decoder(d_audio);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
d_audio->ad_driver->decode_packet(d_audio);
|
|
||||||
}
|
|
||||||
|
|
||||||
d_audio->decode_buffer = mp_audio_buffer_create(NULL);
|
d_audio->decode_buffer = mp_audio_buffer_create(NULL);
|
||||||
if (!reinit_audio_buffer(d_audio)) {
|
|
||||||
uninit_decoder(d_audio);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -171,9 +151,6 @@ int audio_init_best_codec(struct dec_audio *d_audio, char *audio_decoders)
|
|||||||
talloc_asprintf(d_audio, "%s [%s:%s]", decoder->desc, decoder->family,
|
talloc_asprintf(d_audio, "%s [%s:%s]", decoder->desc, decoder->family,
|
||||||
decoder->decoder);
|
decoder->decoder);
|
||||||
MP_VERBOSE(d_audio, "Selected audio codec: %s\n", d_audio->decoder_desc);
|
MP_VERBOSE(d_audio, "Selected audio codec: %s\n", d_audio->decoder_desc);
|
||||||
MP_VERBOSE(d_audio, "AUDIO: %d Hz, %d ch, %s\n",
|
|
||||||
d_audio->decoded.rate, d_audio->decoded.channels.num,
|
|
||||||
af_fmt_to_str(d_audio->decoded.format));
|
|
||||||
} else {
|
} else {
|
||||||
MP_ERR(d_audio, "Failed to initialize an audio decoder for codec '%s'.\n",
|
MP_ERR(d_audio, "Failed to initialize an audio decoder for codec '%s'.\n",
|
||||||
d_audio->header->codec ? d_audio->header->codec : "<unknown>");
|
d_audio->header->codec ? d_audio->header->codec : "<unknown>");
|
||||||
@ -238,6 +215,24 @@ int audio_init_filters(struct dec_audio *d_audio, int in_samplerate,
|
|||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Decode packets until we know the audio format. Then reinit the buffer.
|
||||||
|
* Returns AD_OK on success, negative AD_* code otherwise.
|
||||||
|
* Also returns AD_OK if already initialized (and does nothing).
|
||||||
|
*/
|
||||||
|
int initial_audio_decode(struct dec_audio *da)
|
||||||
|
{
|
||||||
|
while (!mp_audio_config_valid(&da->decoded)) {
|
||||||
|
if (da->decoded.samples > 0)
|
||||||
|
return AD_ERR; // invalid format, rather than uninitialized
|
||||||
|
int ret = da->ad_driver->decode_packet(da);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
if (mp_audio_buffer_samples(da->decode_buffer) > 0) // avoid accidental flush
|
||||||
|
return AD_OK;
|
||||||
|
return reinit_audio_buffer(da) ? AD_OK : AD_ERR;
|
||||||
|
}
|
||||||
|
|
||||||
// Filter len bytes of input, put result into outbuf.
|
// Filter len bytes of input, put result into outbuf.
|
||||||
static int filter_n_bytes(struct dec_audio *da, struct mp_audio_buffer *outbuf,
|
static int filter_n_bytes(struct dec_audio *da, struct mp_audio_buffer *outbuf,
|
||||||
int len)
|
int len)
|
||||||
@ -270,6 +265,9 @@ static int filter_n_bytes(struct dec_audio *da, struct mp_audio_buffer *outbuf,
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (error == AD_WAIT)
|
||||||
|
return error;
|
||||||
|
|
||||||
// Filter
|
// Filter
|
||||||
struct mp_audio filter_data;
|
struct mp_audio filter_data;
|
||||||
mp_audio_buffer_peek(da->decode_buffer, &filter_data);
|
mp_audio_buffer_peek(da->decode_buffer, &filter_data);
|
||||||
@ -306,6 +304,9 @@ static int filter_n_bytes(struct dec_audio *da, struct mp_audio_buffer *outbuf,
|
|||||||
int audio_decode(struct dec_audio *d_audio, struct mp_audio_buffer *outbuf,
|
int audio_decode(struct dec_audio *d_audio, struct mp_audio_buffer *outbuf,
|
||||||
int minsamples)
|
int minsamples)
|
||||||
{
|
{
|
||||||
|
if (!d_audio->afilter)
|
||||||
|
return AD_ERR;
|
||||||
|
|
||||||
// Indicates that a filter seems to be buffering large amounts of data
|
// Indicates that a filter seems to be buffering large amounts of data
|
||||||
int huge_filter_buffer = 0;
|
int huge_filter_buffer = 0;
|
||||||
|
|
||||||
|
@ -51,15 +51,16 @@ struct dec_audio {
|
|||||||
enum {
|
enum {
|
||||||
AD_OK = 0,
|
AD_OK = 0,
|
||||||
AD_ERR = -1,
|
AD_ERR = -1,
|
||||||
AD_NEW_FMT = -2,
|
AD_EOF = -2,
|
||||||
AD_ASYNC_PLAY_DONE = -3,
|
AD_NEW_FMT = -3,
|
||||||
AD_EOF = -4,
|
AD_WAIT = -4,
|
||||||
};
|
};
|
||||||
|
|
||||||
struct mp_decoder_list *audio_decoder_list(void);
|
struct mp_decoder_list *audio_decoder_list(void);
|
||||||
int audio_init_best_codec(struct dec_audio *d_audio, char *audio_decoders);
|
int audio_init_best_codec(struct dec_audio *d_audio, char *audio_decoders);
|
||||||
int audio_decode(struct dec_audio *d_audio, struct mp_audio_buffer *outbuf,
|
int audio_decode(struct dec_audio *d_audio, struct mp_audio_buffer *outbuf,
|
||||||
int minsamples);
|
int minsamples);
|
||||||
|
int initial_audio_decode(struct dec_audio *d_audio);
|
||||||
void audio_reset_decoding(struct dec_audio *d_audio);
|
void audio_reset_decoding(struct dec_audio *d_audio);
|
||||||
void audio_uninit(struct dec_audio *d_audio);
|
void audio_uninit(struct dec_audio *d_audio);
|
||||||
|
|
||||||
|
286
player/audio.c
286
player/audio.c
@ -19,6 +19,7 @@
|
|||||||
#include <stddef.h>
|
#include <stddef.h>
|
||||||
#include <stdbool.h>
|
#include <stdbool.h>
|
||||||
#include <inttypes.h>
|
#include <inttypes.h>
|
||||||
|
#include <limits.h>
|
||||||
#include <math.h>
|
#include <math.h>
|
||||||
#include <assert.h>
|
#include <assert.h>
|
||||||
|
|
||||||
@ -103,6 +104,8 @@ void reinit_audio_chain(struct MPContext *mpctx)
|
|||||||
|
|
||||||
mp_notify(mpctx, MPV_EVENT_AUDIO_RECONFIG, NULL);
|
mp_notify(mpctx, MPV_EVENT_AUDIO_RECONFIG, NULL);
|
||||||
|
|
||||||
|
mpctx->audio_status = STATUS_SYNCING;
|
||||||
|
|
||||||
if (!(mpctx->initialized_flags & INITIALIZED_ACODEC)) {
|
if (!(mpctx->initialized_flags & INITIALIZED_ACODEC)) {
|
||||||
mpctx->initialized_flags |= INITIALIZED_ACODEC;
|
mpctx->initialized_flags |= INITIALIZED_ACODEC;
|
||||||
assert(!mpctx->d_audio);
|
assert(!mpctx->d_audio);
|
||||||
@ -117,9 +120,19 @@ void reinit_audio_chain(struct MPContext *mpctx)
|
|||||||
}
|
}
|
||||||
assert(mpctx->d_audio);
|
assert(mpctx->d_audio);
|
||||||
|
|
||||||
|
if (!mpctx->ao_buffer)
|
||||||
|
mpctx->ao_buffer = mp_audio_buffer_create(mpctx);
|
||||||
|
|
||||||
struct mp_audio in_format;
|
struct mp_audio in_format;
|
||||||
mp_audio_buffer_get_format(mpctx->d_audio->decode_buffer, &in_format);
|
mp_audio_buffer_get_format(mpctx->d_audio->decode_buffer, &in_format);
|
||||||
|
|
||||||
|
if (!mp_audio_config_valid(&in_format)) {
|
||||||
|
// We don't know the audio format yet - so configure it later as we're
|
||||||
|
// resyncing. fill_audio_buffers() will call this function again.
|
||||||
|
mpctx->sleeptime = 0;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
if (mpctx->ao_decoder_fmt && (mpctx->initialized_flags & INITIALIZED_AO) &&
|
if (mpctx->ao_decoder_fmt && (mpctx->initialized_flags & INITIALIZED_AO) &&
|
||||||
!mp_audio_config_equals(mpctx->ao_decoder_fmt, &in_format) &&
|
!mp_audio_config_equals(mpctx->ao_decoder_fmt, &in_format) &&
|
||||||
opts->gapless_audio < 0)
|
opts->gapless_audio < 0)
|
||||||
@ -169,7 +182,6 @@ void reinit_audio_chain(struct MPContext *mpctx)
|
|||||||
struct mp_audio fmt;
|
struct mp_audio fmt;
|
||||||
ao_get_format(ao, &fmt);
|
ao_get_format(ao, &fmt);
|
||||||
|
|
||||||
mpctx->ao_buffer = mp_audio_buffer_create(ao);
|
|
||||||
mp_audio_buffer_reinit(mpctx->ao_buffer, &fmt);
|
mp_audio_buffer_reinit(mpctx->ao_buffer, &fmt);
|
||||||
|
|
||||||
mpctx->ao_decoder_fmt = talloc(NULL, struct mp_audio);
|
mpctx->ao_decoder_fmt = talloc(NULL, struct mp_audio);
|
||||||
@ -185,7 +197,6 @@ void reinit_audio_chain(struct MPContext *mpctx)
|
|||||||
if (recreate_audio_filters(mpctx) < 0)
|
if (recreate_audio_filters(mpctx) < 0)
|
||||||
goto init_error;
|
goto init_error;
|
||||||
|
|
||||||
mpctx->syncing_audio = true;
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
init_error:
|
init_error:
|
||||||
@ -206,6 +217,9 @@ double written_audio_pts(struct MPContext *mpctx)
|
|||||||
struct mp_audio in_format;
|
struct mp_audio in_format;
|
||||||
mp_audio_buffer_get_format(d_audio->decode_buffer, &in_format);
|
mp_audio_buffer_get_format(d_audio->decode_buffer, &in_format);
|
||||||
|
|
||||||
|
if (!mp_audio_config_valid(&in_format) || !d_audio->afilter)
|
||||||
|
return MP_NOPTS_VALUE;;
|
||||||
|
|
||||||
// first calculate the end pts of audio that has been output by decoder
|
// first calculate the end pts of audio that has been output by decoder
|
||||||
double a_pts = d_audio->pts;
|
double a_pts = d_audio->pts;
|
||||||
if (a_pts == MP_NOPTS_VALUE)
|
if (a_pts == MP_NOPTS_VALUE)
|
||||||
@ -240,7 +254,7 @@ double written_audio_pts(struct MPContext *mpctx)
|
|||||||
double playing_audio_pts(struct MPContext *mpctx)
|
double playing_audio_pts(struct MPContext *mpctx)
|
||||||
{
|
{
|
||||||
double pts = written_audio_pts(mpctx);
|
double pts = written_audio_pts(mpctx);
|
||||||
if (pts == MP_NOPTS_VALUE)
|
if (pts == MP_NOPTS_VALUE || !mpctx->ao)
|
||||||
return pts;
|
return pts;
|
||||||
return pts - mpctx->opts->playback_speed * ao_get_delay(mpctx->ao);
|
return pts - mpctx->opts->playback_speed * ao_get_delay(mpctx->ao);
|
||||||
}
|
}
|
||||||
@ -273,142 +287,109 @@ static int write_to_ao(struct MPContext *mpctx, struct mp_audio *data, int flags
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int write_silence_to_ao(struct MPContext *mpctx, int samples, int flags,
|
// Return the number of samples that must be skipped or prepended to reach the
|
||||||
double pts)
|
// target audio pts after a seek (for A/V sync or hr-seek).
|
||||||
|
// Return value (*skip):
|
||||||
|
// >0: skip this many samples
|
||||||
|
// =0: don't do anything
|
||||||
|
// <0: prepend this many samples of silence
|
||||||
|
// Returns false if PTS is not known yet.
|
||||||
|
static bool get_sync_samples(struct MPContext *mpctx, int *skip)
|
||||||
{
|
{
|
||||||
struct mp_audio tmp = {0};
|
|
||||||
mp_audio_buffer_get_format(mpctx->ao_buffer, &tmp);
|
|
||||||
tmp.samples = samples;
|
|
||||||
char *p = talloc_size(NULL, tmp.samples * tmp.sstride);
|
|
||||||
for (int n = 0; n < tmp.num_planes; n++)
|
|
||||||
tmp.planes[n] = p;
|
|
||||||
mp_audio_fill_silence(&tmp, 0, tmp.samples);
|
|
||||||
int r = write_to_ao(mpctx, &tmp, 0, pts);
|
|
||||||
talloc_free(p);
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int audio_start_sync(struct MPContext *mpctx, int playsize)
|
|
||||||
{
|
|
||||||
struct ao *ao = mpctx->ao;
|
|
||||||
struct MPOpts *opts = mpctx->opts;
|
struct MPOpts *opts = mpctx->opts;
|
||||||
struct dec_audio *d_audio = mpctx->d_audio;
|
*skip = 0;
|
||||||
int res;
|
|
||||||
|
|
||||||
assert(d_audio);
|
if (mpctx->audio_status != STATUS_SYNCING)
|
||||||
|
return true;
|
||||||
|
|
||||||
struct mp_audio out_format;
|
struct mp_audio out_format = {0};
|
||||||
ao_get_format(ao, &out_format);
|
ao_get_format(mpctx->ao, &out_format);
|
||||||
|
double play_samplerate = out_format.rate / opts->playback_speed;
|
||||||
|
|
||||||
// Timing info may not be set without
|
bool is_pcm = !(out_format.format & AF_FORMAT_SPECIAL_MASK); // no spdif
|
||||||
res = audio_decode(d_audio, mpctx->ao_buffer, 1);
|
if (!opts->initial_audio_sync || !is_pcm) {
|
||||||
if (res < 0)
|
mpctx->audio_status = STATUS_FILLING;
|
||||||
return res;
|
return true;
|
||||||
|
|
||||||
int samples;
|
|
||||||
bool did_retry = false;
|
|
||||||
double written_pts;
|
|
||||||
double real_samplerate = out_format.rate / opts->playback_speed;
|
|
||||||
bool hrseek = mpctx->hrseek_active; // audio only hrseek
|
|
||||||
mpctx->hrseek_active = false;
|
|
||||||
while (1) {
|
|
||||||
written_pts = written_audio_pts(mpctx);
|
|
||||||
double ptsdiff;
|
|
||||||
if (hrseek)
|
|
||||||
ptsdiff = written_pts - mpctx->hrseek_pts;
|
|
||||||
else
|
|
||||||
ptsdiff = written_pts - mpctx->video_next_pts - mpctx->delay
|
|
||||||
+ mpctx->audio_delay;
|
|
||||||
samples = ptsdiff * real_samplerate;
|
|
||||||
|
|
||||||
// ogg demuxers give packets without timing
|
|
||||||
if (written_pts <= 1 && d_audio->pts == MP_NOPTS_VALUE) {
|
|
||||||
if (!did_retry) {
|
|
||||||
// Try to read more data to see packets that have pts
|
|
||||||
res = audio_decode(d_audio, mpctx->ao_buffer, out_format.rate);
|
|
||||||
if (res < 0)
|
|
||||||
return res;
|
|
||||||
did_retry = true;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
samples = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (fabs(ptsdiff) > 300 || isnan(ptsdiff)) // pts reset or just broken?
|
double written_pts = written_audio_pts(mpctx);
|
||||||
samples = 0;
|
if (written_pts == MP_NOPTS_VALUE && !mp_audio_buffer_samples(mpctx->ao_buffer))
|
||||||
|
return false; // no audio read yet
|
||||||
|
|
||||||
if (samples > 0)
|
bool sync_to_video = mpctx->d_video && mpctx->sync_audio_to_video;
|
||||||
break;
|
|
||||||
|
|
||||||
mpctx->syncing_audio = false;
|
double sync_pts = MP_NOPTS_VALUE;
|
||||||
int skip_samples = -samples;
|
if (sync_to_video) {
|
||||||
int a = MPMIN(skip_samples, MPMAX(playsize, 2500));
|
if (mpctx->video_next_pts != MP_NOPTS_VALUE) {
|
||||||
res = audio_decode(d_audio, mpctx->ao_buffer, a);
|
sync_pts = mpctx->video_next_pts;
|
||||||
if (skip_samples <= mp_audio_buffer_samples(mpctx->ao_buffer)) {
|
} else if (mpctx->video_status < STATUS_READY) {
|
||||||
mp_audio_buffer_skip(mpctx->ao_buffer, skip_samples);
|
return false; // wait until we know a video PTS
|
||||||
if (res < 0)
|
|
||||||
return res;
|
|
||||||
return audio_decode(d_audio, mpctx->ao_buffer, playsize);
|
|
||||||
}
|
}
|
||||||
mp_audio_buffer_clear(mpctx->ao_buffer);
|
} else if (mpctx->hrseek_active) {
|
||||||
if (res < 0)
|
sync_pts = mpctx->hrseek_pts;
|
||||||
return res;
|
|
||||||
}
|
}
|
||||||
if (hrseek)
|
if (sync_pts == MP_NOPTS_VALUE) {
|
||||||
// Don't add silence in audio-only case even if position is too late
|
mpctx->audio_status = STATUS_FILLING;
|
||||||
return 0;
|
return true; // syncing disabled
|
||||||
if (samples >= playsize) {
|
|
||||||
/* This case could fall back to the one below with
|
|
||||||
* samples = playsize, but then silence would keep accumulating
|
|
||||||
* in ao_buffer if the AO accepts less data than it asks for
|
|
||||||
* in playsize. */
|
|
||||||
write_silence_to_ao(mpctx, playsize, 0,
|
|
||||||
written_pts - samples / real_samplerate);
|
|
||||||
return AD_ASYNC_PLAY_DONE;
|
|
||||||
}
|
}
|
||||||
mpctx->syncing_audio = false;
|
|
||||||
mp_audio_buffer_prepend_silence(mpctx->ao_buffer, samples);
|
if (sync_to_video)
|
||||||
return audio_decode(d_audio, mpctx->ao_buffer, playsize);
|
sync_pts += mpctx->delay - mpctx->audio_delay;
|
||||||
|
|
||||||
|
double ptsdiff = written_pts - sync_pts;
|
||||||
|
// Missing timestamp, or PTS reset, or just broken.
|
||||||
|
if (written_pts == MP_NOPTS_VALUE || fabs(ptsdiff) > 300) {
|
||||||
|
MP_WARN(mpctx, "Failed audio resync.\n");
|
||||||
|
mpctx->audio_status = STATUS_FILLING;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
*skip = -ptsdiff * play_samplerate;
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
int fill_audio_out_buffers(struct MPContext *mpctx, double endpts)
|
int fill_audio_out_buffers(struct MPContext *mpctx, double endpts)
|
||||||
{
|
{
|
||||||
struct MPOpts *opts = mpctx->opts;
|
struct MPOpts *opts = mpctx->opts;
|
||||||
struct ao *ao = mpctx->ao;
|
|
||||||
int playsize;
|
|
||||||
int playflags = 0;
|
|
||||||
bool audio_eof = false;
|
|
||||||
bool signal_eof = false;
|
|
||||||
bool partial_fill = false;
|
|
||||||
struct dec_audio *d_audio = mpctx->d_audio;
|
struct dec_audio *d_audio = mpctx->d_audio;
|
||||||
struct mp_audio out_format;
|
|
||||||
ao_get_format(ao, &out_format);
|
|
||||||
// Can't adjust the start of audio with spdif pass-through.
|
|
||||||
bool modifiable_audio_format = !(out_format.format & AF_FORMAT_SPECIAL_MASK);
|
|
||||||
|
|
||||||
assert(d_audio);
|
assert(d_audio);
|
||||||
|
|
||||||
if (mpctx->paused)
|
if (!d_audio->afilter || !mpctx->ao) {
|
||||||
playsize = 1; // just initialize things (audio pts at least)
|
// Probe the initial audio format. Returns AD_OK (and does nothing) if
|
||||||
else
|
// the format is already known.
|
||||||
playsize = ao_get_space(ao);
|
int r = initial_audio_decode(mpctx->d_audio);
|
||||||
|
if (r == AD_WAIT)
|
||||||
// Coming here with hrseek_active still set means audio-only
|
return -1; // continue later when new data is available
|
||||||
if (!mpctx->d_video || !mpctx->sync_audio_to_video)
|
if (r != AD_OK) {
|
||||||
mpctx->syncing_audio = false;
|
MP_ERR(mpctx, "Error initializing audio.\n");
|
||||||
if (!opts->initial_audio_sync || !modifiable_audio_format) {
|
struct track *track = mpctx->current_track[0][STREAM_AUDIO];
|
||||||
mpctx->syncing_audio = false;
|
mp_deselect_track(mpctx, track);
|
||||||
mpctx->hrseek_active = false;
|
return -2;
|
||||||
|
}
|
||||||
|
reinit_audio_chain(mpctx);
|
||||||
|
return -1; // try again next iteration
|
||||||
}
|
}
|
||||||
|
|
||||||
int res;
|
// if paused, just initialize things (audio format & pts)
|
||||||
if (mpctx->syncing_audio || mpctx->hrseek_active)
|
int playsize = 1;
|
||||||
res = audio_start_sync(mpctx, playsize);
|
if (!mpctx->paused)
|
||||||
else
|
playsize = ao_get_space(mpctx->ao);
|
||||||
res = audio_decode(d_audio, mpctx->ao_buffer, playsize);
|
|
||||||
|
|
||||||
if (res < 0) { // EOF, error or format change
|
int skip = 0;
|
||||||
if (res == AD_NEW_FMT) {
|
bool sync_known = get_sync_samples(mpctx, &skip);
|
||||||
|
if (skip > 0) {
|
||||||
|
playsize = MPMIN(skip + 1, MPMAX(playsize, 2500)); // buffer extra data
|
||||||
|
} else if (skip < 0) {
|
||||||
|
playsize = MPMAX(1, playsize + skip); // silence will be prepended
|
||||||
|
}
|
||||||
|
|
||||||
|
int status = AD_OK;
|
||||||
|
if (playsize > mp_audio_buffer_samples(mpctx->ao_buffer)) {
|
||||||
|
status = audio_decode(d_audio, mpctx->ao_buffer, playsize);
|
||||||
|
if (status == AD_WAIT)
|
||||||
|
return -1;
|
||||||
|
if (status == AD_NEW_FMT) {
|
||||||
/* The format change isn't handled too gracefully. A more precise
|
/* The format change isn't handled too gracefully. A more precise
|
||||||
* implementation would require draining buffered old-format audio
|
* implementation would require draining buffered old-format audio
|
||||||
* while displaying video, then doing the output format switch.
|
* while displaying video, then doing the output format switch.
|
||||||
@ -416,16 +397,56 @@ int fill_audio_out_buffers(struct MPContext *mpctx, double endpts)
|
|||||||
if (mpctx->opts->gapless_audio < 1)
|
if (mpctx->opts->gapless_audio < 1)
|
||||||
uninit_player(mpctx, INITIALIZED_AO);
|
uninit_player(mpctx, INITIALIZED_AO);
|
||||||
reinit_audio_chain(mpctx);
|
reinit_audio_chain(mpctx);
|
||||||
return -1;
|
mpctx->sleeptime = 0;
|
||||||
} else if (res == AD_ASYNC_PLAY_DONE)
|
return -1; // retry on next iteration
|
||||||
return 0;
|
|
||||||
else if (res == AD_EOF)
|
|
||||||
audio_eof = true;
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool end_sync = status != AD_OK; // (on error/EOF, start playback immediately)
|
||||||
|
if (skip >= 0) {
|
||||||
|
int max = mp_audio_buffer_samples(mpctx->ao_buffer);
|
||||||
|
mp_audio_buffer_skip(mpctx->ao_buffer, MPMIN(skip, max));
|
||||||
|
// If something is left, we definitely reached the target time.
|
||||||
|
end_sync |= sync_known && skip < max;
|
||||||
|
} else if (skip < 0) {
|
||||||
|
if (-skip < 1000000) { // heuristic against making the buffer too large
|
||||||
|
mp_audio_buffer_prepend_silence(mpctx->ao_buffer, -skip);
|
||||||
|
} else {
|
||||||
|
MP_ERR(mpctx, "Audio starts too late: sync. failed.\n");
|
||||||
|
ao_reset(mpctx->ao);
|
||||||
|
}
|
||||||
|
end_sync = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (mpctx->audio_status == STATUS_SYNCING) {
|
||||||
|
if (end_sync)
|
||||||
|
mpctx->audio_status = STATUS_FILLING;
|
||||||
|
mpctx->sleeptime = 0;
|
||||||
|
return -1; // continue on next iteration
|
||||||
|
}
|
||||||
|
|
||||||
|
assert(mpctx->audio_status >= STATUS_FILLING);
|
||||||
|
|
||||||
|
// Even if we're done decoding and syncing, let video start first - this is
|
||||||
|
// required, because sending audio to the AO already starts playback.
|
||||||
|
if (mpctx->audio_status == STATUS_FILLING && mpctx->sync_audio_to_video &&
|
||||||
|
mpctx->video_status <= STATUS_READY)
|
||||||
|
{
|
||||||
|
mpctx->audio_status = STATUS_READY;
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool audio_eof = status == AD_EOF;
|
||||||
|
bool partial_fill = false;
|
||||||
|
int playflags = 0;
|
||||||
|
|
||||||
|
struct mp_audio out_format = {0};
|
||||||
|
ao_get_format(mpctx->ao, &out_format);
|
||||||
|
double play_samplerate = out_format.rate / opts->playback_speed;
|
||||||
|
|
||||||
if (endpts != MP_NOPTS_VALUE) {
|
if (endpts != MP_NOPTS_VALUE) {
|
||||||
double samples = (endpts - written_audio_pts(mpctx) - mpctx->audio_delay)
|
double samples = (endpts - written_audio_pts(mpctx) - mpctx->audio_delay)
|
||||||
* out_format.rate / opts->playback_speed;
|
* play_samplerate;
|
||||||
if (playsize > samples) {
|
if (playsize > samples) {
|
||||||
playsize = MPMAX(samples, 0);
|
playsize = MPMAX(samples, 0);
|
||||||
audio_eof = true;
|
audio_eof = true;
|
||||||
@ -437,18 +458,15 @@ int fill_audio_out_buffers(struct MPContext *mpctx, double endpts)
|
|||||||
playsize = mp_audio_buffer_samples(mpctx->ao_buffer);
|
playsize = mp_audio_buffer_samples(mpctx->ao_buffer);
|
||||||
partial_fill = true;
|
partial_fill = true;
|
||||||
}
|
}
|
||||||
if (!playsize)
|
|
||||||
return partial_fill && audio_eof ? -2 : -partial_fill;
|
|
||||||
|
|
||||||
if (audio_eof && partial_fill) {
|
audio_eof &= partial_fill;
|
||||||
if (opts->gapless_audio) {
|
|
||||||
|
if (audio_eof) {
|
||||||
// With gapless audio, delay this to ao_uninit. There must be only
|
// With gapless audio, delay this to ao_uninit. There must be only
|
||||||
// 1 final chunk, and that is handled when calling ao_uninit().
|
// 1 final chunk, and that is handled when calling ao_uninit().
|
||||||
signal_eof = true;
|
if (opts->gapless_audio)
|
||||||
} else {
|
|
||||||
playflags |= AOPLAY_FINAL_CHUNK;
|
playflags |= AOPLAY_FINAL_CHUNK;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if (mpctx->paused)
|
if (mpctx->paused)
|
||||||
playsize = 0;
|
playsize = 0;
|
||||||
@ -458,10 +476,18 @@ int fill_audio_out_buffers(struct MPContext *mpctx, double endpts)
|
|||||||
data.samples = MPMIN(data.samples, playsize);
|
data.samples = MPMIN(data.samples, playsize);
|
||||||
int played = write_to_ao(mpctx, &data, playflags, written_audio_pts(mpctx));
|
int played = write_to_ao(mpctx, &data, playflags, written_audio_pts(mpctx));
|
||||||
assert(played >= 0 && played <= data.samples);
|
assert(played >= 0 && played <= data.samples);
|
||||||
|
|
||||||
mp_audio_buffer_skip(mpctx->ao_buffer, played);
|
mp_audio_buffer_skip(mpctx->ao_buffer, played);
|
||||||
|
|
||||||
return signal_eof ? -2 : -partial_fill;
|
mpctx->audio_status = STATUS_PLAYING;
|
||||||
|
if (audio_eof) {
|
||||||
|
mpctx->audio_status = STATUS_DRAINING;
|
||||||
|
// Wait until the AO has played all queued data. In the gapless case,
|
||||||
|
// we trigger EOF immediately, and let it play asynchronously.
|
||||||
|
if (ao_eof_reached(mpctx->ao) || opts->gapless_audio)
|
||||||
|
mpctx->audio_status = STATUS_EOF;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Drop data queued for output, or which the AO is currently outputting.
|
// Drop data queued for output, or which the AO is currently outputting.
|
||||||
|
@ -149,6 +149,22 @@ enum {
|
|||||||
VD_WAIT = 3, // no EOF, but no output; wait until wakeup
|
VD_WAIT = 3, // no EOF, but no output; wait until wakeup
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* Note that playback can be paused, stopped, etc. at any time. While paused,
|
||||||
|
* playback restart is still active, because you want seeking to work even
|
||||||
|
* if paused.
|
||||||
|
* The main purpose of distinguishing these states is proper reinitialization
|
||||||
|
* of A/V sync.
|
||||||
|
*/
|
||||||
|
enum playback_status {
|
||||||
|
// code may compare status values numerically
|
||||||
|
STATUS_SYNCING, // seeking for a position to resume
|
||||||
|
STATUS_FILLING, // decoding more data (so you start with full buffers)
|
||||||
|
STATUS_READY, // buffers full, playback can be started any time
|
||||||
|
STATUS_PLAYING, // normal playback
|
||||||
|
STATUS_DRAINING, // decoding has ended; still playing out queued buffers
|
||||||
|
STATUS_EOF, // playback has ended, or is disabled
|
||||||
|
};
|
||||||
|
|
||||||
#define NUM_PTRACKS 2
|
#define NUM_PTRACKS 2
|
||||||
|
|
||||||
typedef struct MPContext {
|
typedef struct MPContext {
|
||||||
@ -234,16 +250,11 @@ typedef struct MPContext {
|
|||||||
|
|
||||||
struct vo *video_out;
|
struct vo *video_out;
|
||||||
|
|
||||||
/* We're starting playback from scratch or after a seek. Show first
|
enum playback_status video_status, audio_status;
|
||||||
* video frame immediately and reinitialize sync. */
|
bool restart_complete;
|
||||||
bool restart_playback;
|
|
||||||
/* Set if audio should be timed to start with video frame after seeking,
|
/* Set if audio should be timed to start with video frame after seeking,
|
||||||
* not set when e.g. playing cover art */
|
* not set when e.g. playing cover art */
|
||||||
bool sync_audio_to_video;
|
bool sync_audio_to_video;
|
||||||
/* After playback restart (above) or audio stream change, adjust audio
|
|
||||||
* stream by cutting samples or adding silence at the beginning to make
|
|
||||||
* audio playback position match video position. */
|
|
||||||
bool syncing_audio;
|
|
||||||
bool hrseek_active;
|
bool hrseek_active;
|
||||||
bool hrseek_framedrop;
|
bool hrseek_framedrop;
|
||||||
double hrseek_pts;
|
double hrseek_pts;
|
||||||
@ -465,6 +476,7 @@ void run_playloop(struct MPContext *mpctx);
|
|||||||
void idle_loop(struct MPContext *mpctx);
|
void idle_loop(struct MPContext *mpctx);
|
||||||
void handle_force_window(struct MPContext *mpctx, bool reconfig);
|
void handle_force_window(struct MPContext *mpctx, bool reconfig);
|
||||||
void add_frame_pts(struct MPContext *mpctx, double pts);
|
void add_frame_pts(struct MPContext *mpctx, double pts);
|
||||||
|
void finish_playback_restart(struct MPContext *mpctx);
|
||||||
|
|
||||||
// scripting.c
|
// scripting.c
|
||||||
struct mp_scripting {
|
struct mp_scripting {
|
||||||
|
@ -84,6 +84,7 @@ void uninit_player(struct MPContext *mpctx, unsigned int mask)
|
|||||||
mixer_uninit_audio(mpctx->mixer);
|
mixer_uninit_audio(mpctx->mixer);
|
||||||
audio_uninit(mpctx->d_audio);
|
audio_uninit(mpctx->d_audio);
|
||||||
mpctx->d_audio = NULL;
|
mpctx->d_audio = NULL;
|
||||||
|
mpctx->audio_status = STATUS_EOF;
|
||||||
reselect_demux_streams(mpctx);
|
reselect_demux_streams(mpctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -111,6 +112,7 @@ void uninit_player(struct MPContext *mpctx, unsigned int mask)
|
|||||||
if (mpctx->d_video)
|
if (mpctx->d_video)
|
||||||
video_uninit(mpctx->d_video);
|
video_uninit(mpctx->d_video);
|
||||||
mpctx->d_video = NULL;
|
mpctx->d_video = NULL;
|
||||||
|
mpctx->video_status = STATUS_EOF;
|
||||||
mpctx->sync_audio_to_video = false;
|
mpctx->sync_audio_to_video = false;
|
||||||
reselect_demux_streams(mpctx);
|
reselect_demux_streams(mpctx);
|
||||||
}
|
}
|
||||||
@ -1258,7 +1260,6 @@ goto_reopen_demuxer: ;
|
|||||||
|
|
||||||
mpctx->time_frame = 0;
|
mpctx->time_frame = 0;
|
||||||
mpctx->drop_message_shown = 0;
|
mpctx->drop_message_shown = 0;
|
||||||
mpctx->restart_playback = true;
|
|
||||||
mpctx->video_pts = 0;
|
mpctx->video_pts = 0;
|
||||||
mpctx->last_vo_pts = MP_NOPTS_VALUE;
|
mpctx->last_vo_pts = MP_NOPTS_VALUE;
|
||||||
mpctx->last_frame_duration = 0;
|
mpctx->last_frame_duration = 0;
|
||||||
@ -1276,6 +1277,9 @@ goto_reopen_demuxer: ;
|
|||||||
mpctx->eof_reached = false;
|
mpctx->eof_reached = false;
|
||||||
mpctx->last_chapter = -2;
|
mpctx->last_chapter = -2;
|
||||||
mpctx->seek = (struct seek_params){ 0 };
|
mpctx->seek = (struct seek_params){ 0 };
|
||||||
|
mpctx->video_status = mpctx->d_video ? STATUS_SYNCING : STATUS_EOF;
|
||||||
|
mpctx->audio_status = mpctx->d_audio ? STATUS_SYNCING : STATUS_EOF;
|
||||||
|
mpctx->restart_complete = false;
|
||||||
|
|
||||||
// If there's a timeline force an absolute seek to initialize state
|
// If there's a timeline force an absolute seek to initialize state
|
||||||
double startpos = rel_time_to_abs(mpctx, opts->play_start);
|
double startpos = rel_time_to_abs(mpctx, opts->play_start);
|
||||||
|
@ -146,14 +146,14 @@ void add_step_frame(struct MPContext *mpctx, int dir)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void seek_reset(struct MPContext *mpctx, bool reset_ao, bool reset_ac)
|
static void seek_reset(struct MPContext *mpctx, bool reset_ao)
|
||||||
{
|
{
|
||||||
if (mpctx->d_video) {
|
if (mpctx->d_video) {
|
||||||
video_reset_decoding(mpctx->d_video);
|
video_reset_decoding(mpctx->d_video);
|
||||||
vo_seek_reset(mpctx->video_out);
|
vo_seek_reset(mpctx->video_out);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (mpctx->d_audio && reset_ac) {
|
if (mpctx->d_audio) {
|
||||||
audio_reset_decoding(mpctx->d_audio);
|
audio_reset_decoding(mpctx->d_audio);
|
||||||
if (reset_ao)
|
if (reset_ao)
|
||||||
clear_audio_output_buffers(mpctx);
|
clear_audio_output_buffers(mpctx);
|
||||||
@ -168,7 +168,6 @@ static void seek_reset(struct MPContext *mpctx, bool reset_ao, bool reset_ac)
|
|||||||
mpctx->last_frame_duration = 0;
|
mpctx->last_frame_duration = 0;
|
||||||
mpctx->delay = 0;
|
mpctx->delay = 0;
|
||||||
mpctx->time_frame = 0;
|
mpctx->time_frame = 0;
|
||||||
mpctx->restart_playback = true;
|
|
||||||
mpctx->hrseek_active = false;
|
mpctx->hrseek_active = false;
|
||||||
mpctx->hrseek_framedrop = false;
|
mpctx->hrseek_framedrop = false;
|
||||||
mpctx->total_avsync_change = 0;
|
mpctx->total_avsync_change = 0;
|
||||||
@ -176,6 +175,9 @@ static void seek_reset(struct MPContext *mpctx, bool reset_ao, bool reset_ac)
|
|||||||
mpctx->dropped_frames = 0;
|
mpctx->dropped_frames = 0;
|
||||||
mpctx->playback_pts = MP_NOPTS_VALUE;
|
mpctx->playback_pts = MP_NOPTS_VALUE;
|
||||||
mpctx->eof_reached = false;
|
mpctx->eof_reached = false;
|
||||||
|
mpctx->video_status = mpctx->d_video ? STATUS_SYNCING : STATUS_EOF;
|
||||||
|
mpctx->audio_status = mpctx->d_audio ? STATUS_SYNCING : STATUS_EOF;
|
||||||
|
mpctx->restart_complete = false;
|
||||||
|
|
||||||
#if HAVE_ENCODING
|
#if HAVE_ENCODING
|
||||||
encode_lavc_discontinuity(mpctx->encode_lavc_ctx);
|
encode_lavc_discontinuity(mpctx->encode_lavc_ctx);
|
||||||
@ -231,12 +233,9 @@ static int mp_seek(MPContext *mpctx, struct seek_params seek,
|
|||||||
seek.amount += get_current_time(mpctx);
|
seek.amount += get_current_time(mpctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* At least the liba52 decoder wants to read from the input stream
|
|
||||||
* during initialization, so reinit must be done after the demux_seek()
|
|
||||||
* call that clears possible stream EOF. */
|
|
||||||
bool need_reset = false;
|
|
||||||
double demuxer_amount = seek.amount;
|
double demuxer_amount = seek.amount;
|
||||||
if (mpctx->timeline) {
|
if (mpctx->timeline) {
|
||||||
|
bool need_reset = false;
|
||||||
demuxer_amount = timeline_set_from_time(mpctx, seek.amount,
|
demuxer_amount = timeline_set_from_time(mpctx, seek.amount,
|
||||||
&need_reset);
|
&need_reset);
|
||||||
if (demuxer_amount == -1) {
|
if (demuxer_amount == -1) {
|
||||||
@ -249,12 +248,13 @@ static int mp_seek(MPContext *mpctx, struct seek_params seek,
|
|||||||
}
|
}
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
if (need_reset) {
|
if (need_reset) {
|
||||||
reinit_video_chain(mpctx);
|
reinit_video_chain(mpctx);
|
||||||
|
reinit_audio_chain(mpctx);
|
||||||
reinit_subs(mpctx, 0);
|
reinit_subs(mpctx, 0);
|
||||||
reinit_subs(mpctx, 1);
|
reinit_subs(mpctx, 1);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
int demuxer_style = 0;
|
int demuxer_style = 0;
|
||||||
switch (seek.type) {
|
switch (seek.type) {
|
||||||
@ -290,11 +290,7 @@ static int mp_seek(MPContext *mpctx, struct seek_params seek,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (need_reset)
|
seek_reset(mpctx, !timeline_fallthrough);
|
||||||
reinit_audio_chain(mpctx);
|
|
||||||
/* If we just reinitialized audio it doesn't need to be reset,
|
|
||||||
* and resetting could lose audio some decoders produce during init. */
|
|
||||||
seek_reset(mpctx, !timeline_fallthrough, !need_reset);
|
|
||||||
|
|
||||||
if (timeline_fallthrough) {
|
if (timeline_fallthrough) {
|
||||||
// Important if video reinit happens.
|
// Important if video reinit happens.
|
||||||
@ -375,7 +371,7 @@ void execute_queued_seek(struct MPContext *mpctx)
|
|||||||
/* If the user seeks continuously (keeps arrow key down)
|
/* If the user seeks continuously (keeps arrow key down)
|
||||||
* try to finish showing a frame from one location before doing
|
* try to finish showing a frame from one location before doing
|
||||||
* another seek (which could lead to unchanging display). */
|
* another seek (which could lead to unchanging display). */
|
||||||
if (!mpctx->seek.immediate && mpctx->restart_playback &&
|
if (!mpctx->seek.immediate && !mpctx->restart_complete &&
|
||||||
mp_time_sec() - mpctx->start_timestamp < 0.3)
|
mp_time_sec() - mpctx->start_timestamp < 0.3)
|
||||||
return;
|
return;
|
||||||
mp_seek(mpctx, mpctx->seek, false);
|
mp_seek(mpctx, mpctx->seek, false);
|
||||||
@ -547,7 +543,8 @@ bool mp_seek_chapter(struct MPContext *mpctx, int chapter)
|
|||||||
|
|
||||||
static void update_avsync(struct MPContext *mpctx)
|
static void update_avsync(struct MPContext *mpctx)
|
||||||
{
|
{
|
||||||
if (!mpctx->d_audio || !mpctx->d_video)
|
if (mpctx->audio_status != STATUS_PLAYING ||
|
||||||
|
mpctx->video_status != STATUS_PLAYING)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
double a_pos = playing_audio_pts(mpctx);
|
double a_pos = playing_audio_pts(mpctx);
|
||||||
@ -578,7 +575,7 @@ static void adjust_sync(struct MPContext *mpctx, double frame_time)
|
|||||||
{
|
{
|
||||||
struct MPOpts *opts = mpctx->opts;
|
struct MPOpts *opts = mpctx->opts;
|
||||||
|
|
||||||
if (!mpctx->d_audio || mpctx->syncing_audio)
|
if (mpctx->audio_status != STATUS_PLAYING)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
double a_pts = written_audio_pts(mpctx) - mpctx->delay;
|
double a_pts = written_audio_pts(mpctx) - mpctx->delay;
|
||||||
@ -790,7 +787,7 @@ static void handle_sstep(struct MPContext *mpctx)
|
|||||||
{
|
{
|
||||||
struct MPOpts *opts = mpctx->opts;
|
struct MPOpts *opts = mpctx->opts;
|
||||||
if (opts->step_sec > 0 && !mpctx->stop_play && !mpctx->paused &&
|
if (opts->step_sec > 0 && !mpctx->stop_play && !mpctx->paused &&
|
||||||
!mpctx->restart_playback)
|
mpctx->restart_complete)
|
||||||
{
|
{
|
||||||
set_osd_function(mpctx, OSD_FFW);
|
set_osd_function(mpctx, OSD_FFW);
|
||||||
queue_seek(mpctx, MPSEEK_RELATIVE, opts->step_sec, 0, true);
|
queue_seek(mpctx, MPSEEK_RELATIVE, opts->step_sec, 0, true);
|
||||||
@ -900,11 +897,8 @@ static double get_wakeup_period(struct MPContext *mpctx)
|
|||||||
void run_playloop(struct MPContext *mpctx)
|
void run_playloop(struct MPContext *mpctx)
|
||||||
{
|
{
|
||||||
struct MPOpts *opts = mpctx->opts;
|
struct MPOpts *opts = mpctx->opts;
|
||||||
bool full_audio_buffers = false;
|
|
||||||
bool audio_left = false, video_left = false;
|
|
||||||
double endpts = get_play_end_pts(mpctx);
|
double endpts = get_play_end_pts(mpctx);
|
||||||
bool end_is_chapter = false;
|
bool end_is_chapter = false;
|
||||||
bool was_restart = mpctx->restart_playback;
|
|
||||||
bool new_frame_shown = false;
|
bool new_frame_shown = false;
|
||||||
|
|
||||||
#if HAVE_ENCODING
|
#if HAVE_ENCODING
|
||||||
@ -930,12 +924,8 @@ void run_playloop(struct MPContext *mpctx)
|
|||||||
endpts = end;
|
endpts = end;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (mpctx->d_audio && !mpctx->restart_playback && !ao_untimed(mpctx->ao)) {
|
if (mpctx->d_audio)
|
||||||
int status = fill_audio_out_buffers(mpctx, endpts);
|
fill_audio_out_buffers(mpctx, endpts);
|
||||||
full_audio_buffers = status >= 0;
|
|
||||||
// Not at audio stream EOF yet
|
|
||||||
audio_left = status > -2;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (mpctx->video_out) {
|
if (mpctx->video_out) {
|
||||||
vo_check_events(mpctx->video_out);
|
vo_check_events(mpctx->video_out);
|
||||||
@ -957,8 +947,11 @@ void run_playloop(struct MPContext *mpctx)
|
|||||||
|
|
||||||
double frame_time = 0;
|
double frame_time = 0;
|
||||||
int r = update_video(mpctx, endpts, !still_playing, &frame_time);
|
int r = update_video(mpctx, endpts, !still_playing, &frame_time);
|
||||||
|
MP_TRACE(mpctx, "update_video: %d (still_playing=%d)\n", r, still_playing);
|
||||||
|
|
||||||
|
if (r == VD_WAIT) // Demuxer will wake us up for more packets to decode.
|
||||||
|
break;
|
||||||
|
|
||||||
MP_TRACE(mpctx, "update_video: %d\n", r);
|
|
||||||
if (r < 0) {
|
if (r < 0) {
|
||||||
MP_FATAL(mpctx, "Could not initialize video chain.\n");
|
MP_FATAL(mpctx, "Could not initialize video chain.\n");
|
||||||
int uninit = INITIALIZED_VCODEC;
|
int uninit = INITIALIZED_VCODEC;
|
||||||
@ -979,49 +972,53 @@ void run_playloop(struct MPContext *mpctx)
|
|||||||
mpctx->playing_last_frame = true;
|
mpctx->playing_last_frame = true;
|
||||||
MP_VERBOSE(mpctx, "showing last frame\n");
|
MP_VERBOSE(mpctx, "showing last frame\n");
|
||||||
}
|
}
|
||||||
if (mpctx->playing_last_frame) {
|
|
||||||
r = VD_PROGRESS; // don't stop playback yet
|
|
||||||
MP_TRACE(mpctx, "still showing last frame\n");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
video_left = r > 0;
|
if (r == VD_NEW_FRAME) {
|
||||||
|
|
||||||
if (r == VD_WAIT)
|
|
||||||
break;
|
|
||||||
|
|
||||||
if (mpctx->restart_playback)
|
|
||||||
mpctx->sleeptime = 0;
|
|
||||||
|
|
||||||
if (r == VD_NEW_FRAME)
|
|
||||||
MP_TRACE(mpctx, "frametime=%5.3f\n", frame_time);
|
MP_TRACE(mpctx, "frametime=%5.3f\n", frame_time);
|
||||||
|
|
||||||
if (r == VD_NEW_FRAME && !mpctx->restart_playback) {
|
if (mpctx->video_status > STATUS_PLAYING)
|
||||||
|
mpctx->video_status = STATUS_PLAYING;
|
||||||
|
|
||||||
|
if (mpctx->video_status >= STATUS_READY) {
|
||||||
mpctx->time_frame += frame_time / opts->playback_speed;
|
mpctx->time_frame += frame_time / opts->playback_speed;
|
||||||
adjust_sync(mpctx, frame_time);
|
adjust_sync(mpctx, frame_time);
|
||||||
}
|
}
|
||||||
|
} else if (r == VD_EOF && mpctx->playing_last_frame) {
|
||||||
if (!video_left) {
|
// Let video timing code continue displaying.
|
||||||
|
mpctx->video_status = STATUS_DRAINING;
|
||||||
|
MP_VERBOSE(mpctx, "still showing last frame\n");
|
||||||
|
} else if (r <= 0) {
|
||||||
|
// EOF or error
|
||||||
mpctx->delay = 0;
|
mpctx->delay = 0;
|
||||||
mpctx->last_av_difference = 0;
|
mpctx->last_av_difference = 0;
|
||||||
}
|
mpctx->video_status = STATUS_EOF;
|
||||||
|
if (mpctx->paused && vo->hasframe)
|
||||||
|
mpctx->video_status = STATUS_DRAINING;
|
||||||
|
MP_VERBOSE(mpctx, "video EOF\n");
|
||||||
|
} else {
|
||||||
|
if (mpctx->video_status > STATUS_PLAYING)
|
||||||
|
mpctx->video_status = STATUS_PLAYING;
|
||||||
|
|
||||||
if (!video_left || (mpctx->paused && !mpctx->restart_playback)) {
|
// Decode more in next iteration.
|
||||||
if (mpctx->paused)
|
|
||||||
video_left |= vo->hasframe;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (r != VD_NEW_FRAME && !mpctx->playing_last_frame) {
|
|
||||||
mpctx->sleeptime = 0;
|
mpctx->sleeptime = 0;
|
||||||
break;
|
MP_TRACE(mpctx, "filtering more video\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Actual playback starts when both audio and video are ready.
|
||||||
|
if (mpctx->video_status == STATUS_READY)
|
||||||
|
break;
|
||||||
|
|
||||||
|
if (mpctx->paused && mpctx->video_status >= STATUS_READY)
|
||||||
|
break;
|
||||||
|
|
||||||
mpctx->time_frame -= get_relative_time(mpctx);
|
mpctx->time_frame -= get_relative_time(mpctx);
|
||||||
double audio_pts = playing_audio_pts(mpctx);
|
double audio_pts = playing_audio_pts(mpctx);
|
||||||
if (!mpctx->sync_audio_to_video) {
|
if (!mpctx->sync_audio_to_video || mpctx->video_status < STATUS_READY) {
|
||||||
mpctx->time_frame = 0;
|
mpctx->time_frame = 0;
|
||||||
} else if (full_audio_buffers && !mpctx->restart_playback) {
|
} else if (mpctx->audio_status == STATUS_PLAYING &&
|
||||||
|
mpctx->video_status == STATUS_PLAYING)
|
||||||
|
{
|
||||||
double buffered_audio = ao_get_delay(mpctx->ao);
|
double buffered_audio = ao_get_delay(mpctx->ao);
|
||||||
MP_TRACE(mpctx, "audio delay=%f\n", buffered_audio);
|
MP_TRACE(mpctx, "audio delay=%f\n", buffered_audio);
|
||||||
|
|
||||||
@ -1061,7 +1058,7 @@ void run_playloop(struct MPContext *mpctx)
|
|||||||
mpctx->sleeptime = 0;
|
mpctx->sleeptime = 0;
|
||||||
mpctx->playing_last_frame = false;
|
mpctx->playing_last_frame = false;
|
||||||
|
|
||||||
// last frame case (don't set video_left - consider format changes)
|
// last frame case
|
||||||
if (r != VD_NEW_FRAME)
|
if (r != VD_NEW_FRAME)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
@ -1115,7 +1112,7 @@ void run_playloop(struct MPContext *mpctx)
|
|||||||
duration = diff * 1e6;
|
duration = diff * 1e6;
|
||||||
mpctx->last_frame_duration = diff;
|
mpctx->last_frame_duration = diff;
|
||||||
}
|
}
|
||||||
if (mpctx->restart_playback)
|
if (mpctx->video_status != STATUS_PLAYING)
|
||||||
duration = -1;
|
duration = -1;
|
||||||
|
|
||||||
MP_STATS(mpctx, "start flip");
|
MP_STATS(mpctx, "start flip");
|
||||||
@ -1133,65 +1130,57 @@ void run_playloop(struct MPContext *mpctx)
|
|||||||
mpctx->time_frame -= get_relative_time(mpctx);
|
mpctx->time_frame -= get_relative_time(mpctx);
|
||||||
}
|
}
|
||||||
mpctx->shown_vframes++;
|
mpctx->shown_vframes++;
|
||||||
if (mpctx->restart_playback) {
|
if (mpctx->video_status < STATUS_PLAYING)
|
||||||
if (mpctx->sync_audio_to_video) {
|
mpctx->video_status = STATUS_READY;
|
||||||
mpctx->syncing_audio = true;
|
|
||||||
if (mpctx->d_audio)
|
|
||||||
fill_audio_out_buffers(mpctx, endpts);
|
|
||||||
mpctx->restart_playback = false;
|
|
||||||
mp_notify(mpctx, MPV_EVENT_PLAYBACK_RESTART, NULL);
|
|
||||||
}
|
|
||||||
mpctx->time_frame = 0;
|
|
||||||
get_relative_time(mpctx);
|
|
||||||
}
|
|
||||||
update_avsync(mpctx);
|
update_avsync(mpctx);
|
||||||
screenshot_flip(mpctx);
|
screenshot_flip(mpctx);
|
||||||
new_frame_shown = true;
|
new_frame_shown = true;
|
||||||
|
|
||||||
mp_notify(mpctx, MPV_EVENT_TICK, NULL);
|
mp_notify(mpctx, MPV_EVENT_TICK, NULL);
|
||||||
|
|
||||||
|
if (!mpctx->sync_audio_to_video)
|
||||||
|
mpctx->video_status = STATUS_EOF;
|
||||||
|
|
||||||
break;
|
break;
|
||||||
} // video
|
} // video
|
||||||
|
|
||||||
if (!video_left || mpctx->paused) {
|
if (mpctx->video_status == STATUS_EOF || mpctx->paused) {
|
||||||
if (mp_time_sec() - mpctx->last_idle_tick > 0.5) {
|
if (mp_time_sec() - mpctx->last_idle_tick > 0.5) {
|
||||||
mpctx->last_idle_tick = mp_time_sec();
|
mpctx->last_idle_tick = mp_time_sec();
|
||||||
mp_notify(mpctx, MPV_EVENT_TICK, NULL);
|
mp_notify(mpctx, MPV_EVENT_TICK, NULL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
video_left &= mpctx->sync_audio_to_video; // force no-video semantics
|
// We always make sure audio and video buffers are filled before actually
|
||||||
|
// starting playback. This code handles starting them at the same time.
|
||||||
if (mpctx->d_audio && (mpctx->restart_playback ? !video_left :
|
if (mpctx->audio_status >= STATUS_READY &&
|
||||||
ao_untimed(mpctx->ao) && (mpctx->delay <= 0 ||
|
mpctx->video_status >= STATUS_READY)
|
||||||
!video_left)))
|
|
||||||
{
|
{
|
||||||
int status = fill_audio_out_buffers(mpctx, endpts);
|
if (mpctx->video_status == STATUS_READY) {
|
||||||
// Not at audio stream EOF yet
|
mpctx->video_status = STATUS_PLAYING;
|
||||||
audio_left = status > -2;
|
get_relative_time(mpctx);
|
||||||
|
mpctx->sleeptime = 0;
|
||||||
|
new_frame_shown = true;
|
||||||
|
}
|
||||||
|
if (mpctx->audio_status == STATUS_READY)
|
||||||
|
fill_audio_out_buffers(mpctx, endpts); // actually play prepared buffer
|
||||||
|
if (!mpctx->restart_complete) {
|
||||||
|
mpctx->hrseek_active = false;
|
||||||
|
mp_notify(mpctx, MPV_EVENT_PLAYBACK_RESTART, NULL);
|
||||||
|
mpctx->restart_complete = true;
|
||||||
}
|
}
|
||||||
if (mpctx->d_audio) {
|
|
||||||
/* When all audio has been written to output driver, stay in the
|
|
||||||
* main loop handling commands until it has been mostly consumed,
|
|
||||||
* except in the gapless case, where the next file will be started
|
|
||||||
* while audio from the current one still remains to be played.
|
|
||||||
*/
|
|
||||||
audio_left |= !ao_eof_reached(mpctx->ao) && !opts->gapless_audio;
|
|
||||||
}
|
}
|
||||||
if (!video_left)
|
|
||||||
mpctx->restart_playback = false;
|
|
||||||
|
|
||||||
update_osd_msg(mpctx);
|
if (mpctx->video_status == STATUS_EOF &&
|
||||||
|
mpctx->audio_status >= STATUS_PLAYING)
|
||||||
if (!video_left && (!mpctx->paused || was_restart)) {
|
{
|
||||||
double a_pos = 0;
|
double a_pos = 0;
|
||||||
if (mpctx->d_audio)
|
if (mpctx->d_audio)
|
||||||
a_pos = playing_audio_pts(mpctx);
|
a_pos = playing_audio_pts(mpctx);
|
||||||
mpctx->playback_pts = a_pos;
|
mpctx->playback_pts = a_pos;
|
||||||
if (was_restart)
|
|
||||||
mp_notify(mpctx, MPV_EVENT_PLAYBACK_RESTART, NULL);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
update_osd_msg(mpctx);
|
||||||
update_subtitles(mpctx);
|
update_subtitles(mpctx);
|
||||||
|
|
||||||
/* It's possible for the user to simultaneously switch both audio
|
/* It's possible for the user to simultaneously switch both audio
|
||||||
@ -1201,12 +1190,14 @@ void run_playloop(struct MPContext *mpctx)
|
|||||||
* We want this check to trigger if we seeked to this position,
|
* We want this check to trigger if we seeked to this position,
|
||||||
* but not if we paused at it with audio possibly still buffered in
|
* but not if we paused at it with audio possibly still buffered in
|
||||||
* the AO. There's currently no working way to check buffered audio
|
* the AO. There's currently no working way to check buffered audio
|
||||||
* inside AO while paused. Thus the "was_restart" check below, which
|
* inside AO while paused. Thus the "was_audio_restart" check below, which
|
||||||
* should trigger after seek only, when we know there's no audio
|
* should trigger after seek only, when we know there's no audio
|
||||||
* buffered.
|
* buffered.
|
||||||
*/
|
*/
|
||||||
if ((mpctx->d_audio || mpctx->d_video) && !audio_left && !video_left
|
if ((mpctx->d_audio || mpctx->d_video) && !mpctx->paused &&
|
||||||
&& (!mpctx->paused || was_restart)) {
|
mpctx->audio_status == STATUS_EOF &&
|
||||||
|
mpctx->video_status == STATUS_EOF)
|
||||||
|
{
|
||||||
if (end_is_chapter) {
|
if (end_is_chapter) {
|
||||||
mp_seek(mpctx, (struct seek_params){
|
mp_seek(mpctx, (struct seek_params){
|
||||||
.type = MPSEEK_ABSOLUTE,
|
.type = MPSEEK_ABSOLUTE,
|
||||||
@ -1218,11 +1209,11 @@ void run_playloop(struct MPContext *mpctx)
|
|||||||
|
|
||||||
mp_handle_nav(mpctx);
|
mp_handle_nav(mpctx);
|
||||||
|
|
||||||
if (!mpctx->stop_play && !mpctx->restart_playback) {
|
if (!mpctx->stop_play && mpctx->restart_complete) {
|
||||||
|
|
||||||
// If no more video is available, one frame means one playloop iteration.
|
// If no more video is available, one frame means one playloop iteration.
|
||||||
// Otherwise, one frame means one video frame.
|
// Otherwise, one frame means one video frame.
|
||||||
if (!video_left)
|
if (mpctx->video_status == STATUS_EOF)
|
||||||
new_frame_shown = true;
|
new_frame_shown = true;
|
||||||
|
|
||||||
if (opts->playing_msg && !mpctx->playing_msg_shown && new_frame_shown) {
|
if (opts->playing_msg && !mpctx->playing_msg_shown && new_frame_shown) {
|
||||||
|
@ -223,7 +223,7 @@ int reinit_video_chain(struct MPContext *mpctx)
|
|||||||
vo_control(mpctx->video_out, mpctx->paused ? VOCTRL_PAUSE
|
vo_control(mpctx->video_out, mpctx->paused ? VOCTRL_PAUSE
|
||||||
: VOCTRL_RESUME, NULL);
|
: VOCTRL_RESUME, NULL);
|
||||||
|
|
||||||
mpctx->restart_playback = true;
|
mpctx->video_status = STATUS_SYNCING;
|
||||||
mpctx->sync_audio_to_video = !sh->attached_picture;
|
mpctx->sync_audio_to_video = !sh->attached_picture;
|
||||||
mpctx->delay = 0;
|
mpctx->delay = 0;
|
||||||
mpctx->video_next_pts = MP_NOPTS_VALUE;
|
mpctx->video_next_pts = MP_NOPTS_VALUE;
|
||||||
@ -231,6 +231,10 @@ int reinit_video_chain(struct MPContext *mpctx)
|
|||||||
mpctx->last_frame_duration = 0;
|
mpctx->last_frame_duration = 0;
|
||||||
mpctx->vo_pts_history_seek_ts++;
|
mpctx->vo_pts_history_seek_ts++;
|
||||||
|
|
||||||
|
// If we switch on video again, ensure audio position matches up.
|
||||||
|
if (mpctx->d_audio)
|
||||||
|
mpctx->audio_status = STATUS_SYNCING;
|
||||||
|
|
||||||
vo_seek_reset(mpctx->video_out);
|
vo_seek_reset(mpctx->video_out);
|
||||||
reset_subtitles(mpctx, 0);
|
reset_subtitles(mpctx, 0);
|
||||||
reset_subtitles(mpctx, 1);
|
reset_subtitles(mpctx, 1);
|
||||||
@ -277,7 +281,7 @@ static int check_framedrop(struct MPContext *mpctx, double frame_time)
|
|||||||
struct track *t_audio = mpctx->current_track[0][STREAM_AUDIO];
|
struct track *t_audio = mpctx->current_track[0][STREAM_AUDIO];
|
||||||
struct sh_stream *sh_audio = t_audio ? t_audio->stream : NULL;
|
struct sh_stream *sh_audio = t_audio ? t_audio->stream : NULL;
|
||||||
// check for frame-drop:
|
// check for frame-drop:
|
||||||
if (mpctx->d_audio && !ao_untimed(mpctx->ao) && sh_audio &&
|
if (mpctx->d_audio && mpctx->ao && !ao_untimed(mpctx->ao) && sh_audio &&
|
||||||
!demux_stream_eof(sh_audio))
|
!demux_stream_eof(sh_audio))
|
||||||
{
|
{
|
||||||
float delay = opts->playback_speed * ao_get_delay(mpctx->ao);
|
float delay = opts->playback_speed * ao_get_delay(mpctx->ao);
|
||||||
@ -288,7 +292,7 @@ static int check_framedrop(struct MPContext *mpctx, double frame_time)
|
|||||||
// we should avoid dropping too many frames in sequence unless we
|
// we should avoid dropping too many frames in sequence unless we
|
||||||
// are too late. and we allow 100ms A-V delay here:
|
// are too late. and we allow 100ms A-V delay here:
|
||||||
if (d < -mpctx->dropped_frames * frame_time - 0.100 && !mpctx->paused
|
if (d < -mpctx->dropped_frames * frame_time - 0.100 && !mpctx->paused
|
||||||
&& !mpctx->restart_playback) {
|
&& mpctx->video_status == STATUS_PLAYING) {
|
||||||
mpctx->drop_frame_cnt++;
|
mpctx->drop_frame_cnt++;
|
||||||
mpctx->dropped_frames++;
|
mpctx->dropped_frames++;
|
||||||
return mpctx->opts->frame_dropping;
|
return mpctx->opts->frame_dropping;
|
||||||
@ -321,7 +325,8 @@ static int decode_image(struct MPContext *mpctx)
|
|||||||
{
|
{
|
||||||
mpctx->hrseek_framedrop = false;
|
mpctx->hrseek_framedrop = false;
|
||||||
}
|
}
|
||||||
int framedrop_type = mpctx->hrseek_active && mpctx->hrseek_framedrop ?
|
bool hrseek = mpctx->hrseek_active && mpctx->video_status == STATUS_SYNCING;
|
||||||
|
int framedrop_type = hrseek && mpctx->hrseek_framedrop ?
|
||||||
1 : check_framedrop(mpctx, -1);
|
1 : check_framedrop(mpctx, -1);
|
||||||
d_video->waiting_decoded_mpi =
|
d_video->waiting_decoded_mpi =
|
||||||
video_decode(d_video, pkt, framedrop_type);
|
video_decode(d_video, pkt, framedrop_type);
|
||||||
@ -449,7 +454,8 @@ static int video_output_image(struct MPContext *mpctx, double endpts,
|
|||||||
add_frame_pts(mpctx, pts);
|
add_frame_pts(mpctx, pts);
|
||||||
|
|
||||||
bool drop = false;
|
bool drop = false;
|
||||||
if (mpctx->hrseek_active && pts < mpctx->hrseek_pts - .005)
|
bool hrseek = mpctx->hrseek_active && mpctx->video_status == STATUS_SYNCING;
|
||||||
|
if (hrseek && pts < mpctx->hrseek_pts - .005)
|
||||||
drop = true;
|
drop = true;
|
||||||
if (endpts != MP_NOPTS_VALUE && pts >= endpts) {
|
if (endpts != MP_NOPTS_VALUE && pts >= endpts) {
|
||||||
drop = true;
|
drop = true;
|
||||||
@ -537,7 +543,6 @@ int update_video(struct MPContext *mpctx, double endpts, bool reconfig_ok,
|
|||||||
}
|
}
|
||||||
|
|
||||||
double pts = vo_get_next_pts(video_out, 0);
|
double pts = vo_get_next_pts(video_out, 0);
|
||||||
mpctx->hrseek_active = false;
|
|
||||||
double last_pts = mpctx->video_next_pts;
|
double last_pts = mpctx->video_next_pts;
|
||||||
if (last_pts == MP_NOPTS_VALUE)
|
if (last_pts == MP_NOPTS_VALUE)
|
||||||
last_pts = pts;
|
last_pts = pts;
|
||||||
|
Loading…
Reference in New Issue
Block a user