audio: refactor how data is passed to AO

This replaces the two buffers (ao_chain.ao_buffer in the core, and
buffer_state.buffers in the AO) with a single queue. Instead of having a
byte based buffer, the queue is simply a list of audio frames, as output
by the decoder. This should make dataflow simpler and reduce copying.

It also attempts to simplify fill_audio_out_buffers(), the function I
always hated most, because it's full of subtle and buggy logic.

Unfortunately, I got assaulted by corner cases, dumb features (attempt
at seamless looping, really?), and other crap, so it got pretty
complicated again. fill_audio_out_buffers() is still full of subtle and
buggy logic. Maybe it got worse. On the other hand, maybe there really
is some progress. Who knows.

Originally, the data flow parts was meant to be in f_output_chain, but
due to tricky interactions with the playloop code, it's now in the dummy
filter in audio.c.

At least this improves the way the audio PTS is passed to the encoder in
encoding mode. Now it attempts to pass frames directly, along with the
pts, which should minimize timestamp problems. But to be honest, encoder
mode is one big kludge that shouldn't exist in this way.

This commit should be considered pre-alpha code. There are lots of bugs
still hiding.
This commit is contained in:
wm4 2020-08-28 20:23:54 +02:00
parent bb1f821078
commit b74c09efbf
12 changed files with 677 additions and 807 deletions

View File

@ -45,7 +45,6 @@ enum {
AO_EVENT_RELOAD = 1,
AO_EVENT_HOTPLUG = 2,
AO_EVENT_INITIAL_UNBLOCK = 4,
AO_EVENT_UNDERRUN = 8,
};
enum {
@ -98,16 +97,16 @@ void ao_get_format(struct ao *ao,
const char *ao_get_name(struct ao *ao);
const char *ao_get_description(struct ao *ao);
bool ao_untimed(struct ao *ao);
int ao_play(struct ao *ao, void **data, int samples, int flags);
int ao_control(struct ao *ao, enum aocontrol cmd, void *arg);
void ao_set_gain(struct ao *ao, float gain);
double ao_get_delay(struct ao *ao);
int ao_get_space(struct ao *ao);
void ao_reset(struct ao *ao);
void ao_pause(struct ao *ao);
void ao_resume(struct ao *ao);
void ao_start(struct ao *ao);
void ao_set_paused(struct ao *ao, bool paused);
void ao_drain(struct ao *ao);
bool ao_eof_reached(struct ao *ao);
bool ao_is_playing(struct ao *ao);
struct mp_async_queue;
struct mp_async_queue *ao_get_queue(struct ao *ao);
int ao_query_and_reset_events(struct ao *ao, int events);
int ao_add_events(struct ao *ao, int events);
void ao_unblock(struct ao *ao);

View File

@ -30,8 +30,11 @@
#include "config.h"
#include "options/options.h"
#include "common/common.h"
#include "audio/aframe.h"
#include "audio/format.h"
#include "audio/fmt-conversion.h"
#include "filters/filter_internal.h"
#include "filters/f_utils.h"
#include "mpv_talloc.h"
#include "ao.h"
#include "internal.h"
@ -44,20 +47,19 @@ struct priv {
int pcmhack;
int aframesize;
int aframecount;
int64_t savepts;
int framecount;
int64_t lastpts;
int sample_size;
const void *sample_padding;
double expected_next_pts;
struct mp_filter *filter_root;
struct mp_filter *fix_frame_size;
AVRational worst_time_base;
bool shutdown;
};
static void encode(struct ao *ao, double apts, void **data);
static void read_frames(struct ao *ao);
static bool supports_format(const AVCodec *codec, int format)
{
@ -151,7 +153,6 @@ static int init(struct ao *ao)
// but at least one!
ac->framecount = MPMAX(ac->framecount, 1);
ac->savepts = AV_NOPTS_VALUE;
ac->lastpts = AV_NOPTS_VALUE;
ao->untimed = true;
@ -159,8 +160,10 @@ static int init(struct ao *ao)
ao->device_buffer = ac->aframesize * ac->framecount;
ao->period_size = ao->device_buffer;
if (ao->channels.num > AV_NUM_DATA_POINTERS)
goto fail;
ac->filter_root = mp_filter_create_root(ao->global);
ac->fix_frame_size = mp_fixed_aframe_size_create(ac->filter_root,
ac->aframesize, true);
MP_HANDLE_OOM(ac->fix_frame_size);
return 0;
@ -185,103 +188,81 @@ static void uninit(struct ao *ao)
pthread_mutex_unlock(&ectx->lock);
outpts += encoder_get_offset(ac->enc);
encode(ao, outpts, NULL);
if (!mp_pin_in_write(ac->fix_frame_size->pins[0], MP_EOF_FRAME))
MP_WARN(ao, "could not flush last frame\n");
read_frames(ao);
encoder_encode(ac->enc, NULL);
}
talloc_free(ac->filter_root);
}
// must get exactly ac->aframesize amount of data
static void encode(struct ao *ao, double apts, void **data)
static void encode(struct ao *ao, struct mp_aframe *af)
{
struct priv *ac = ao->priv;
struct encode_lavc_context *ectx = ao->encode_lavc_ctx;
AVCodecContext *encoder = ac->enc->encoder;
double realapts = ac->aframecount * (double) ac->aframesize /
ao->samplerate;
double outpts = mp_aframe_get_pts(af);
ac->aframecount++;
AVFrame *frame = mp_aframe_to_avframe(af);
if (!frame)
abort();
pthread_mutex_lock(&ectx->lock);
if (data)
ectx->audio_pts_offset = realapts - apts;
pthread_mutex_unlock(&ectx->lock);
frame->pts = rint(outpts * av_q2d(av_inv_q(encoder->time_base)));
if(data) {
AVFrame *frame = av_frame_alloc();
frame->format = af_to_avformat(ao->format);
frame->nb_samples = ac->aframesize;
frame->channels = encoder->channels;
frame->channel_layout = encoder->channel_layout;
size_t num_planes = af_fmt_is_planar(ao->format) ? ao->channels.num : 1;
assert(num_planes <= AV_NUM_DATA_POINTERS);
for (int n = 0; n < num_planes; n++)
frame->extended_data[n] = data[n];
frame->linesize[0] = frame->nb_samples * ao->sstride;
frame->pts = rint(apts * av_q2d(av_inv_q(encoder->time_base)));
int64_t frame_pts = av_rescale_q(frame->pts, encoder->time_base,
ac->worst_time_base);
while (ac->lastpts != AV_NOPTS_VALUE && frame_pts <= ac->lastpts) {
// whatever the fuck this code does?
MP_WARN(ao, "audio frame pts went backwards (%d <- %d), autofixed\n",
(int)frame->pts, (int)ac->lastpts);
frame_pts = ac->lastpts + 1;
ac->lastpts = frame_pts;
frame->pts = av_rescale_q(frame_pts, ac->worst_time_base,
encoder->time_base);
frame_pts = av_rescale_q(frame->pts, encoder->time_base,
int64_t frame_pts = av_rescale_q(frame->pts, encoder->time_base,
ac->worst_time_base);
}
if (ac->lastpts != AV_NOPTS_VALUE && frame_pts <= ac->lastpts) {
// whatever the fuck this code does?
MP_WARN(ao, "audio frame pts went backwards (%d <- %d), autofixed\n",
(int)frame->pts, (int)ac->lastpts);
frame_pts = ac->lastpts + 1;
ac->lastpts = frame_pts;
frame->pts = av_rescale_q(frame_pts, ac->worst_time_base,
encoder->time_base);
frame_pts = av_rescale_q(frame->pts, encoder->time_base,
ac->worst_time_base);
}
ac->lastpts = frame_pts;
frame->quality = encoder->global_quality;
encoder_encode(ac->enc, frame);
av_frame_free(&frame);
} else {
encoder_encode(ac->enc, NULL);
frame->quality = encoder->global_quality;
encoder_encode(ac->enc, frame);
av_frame_free(&frame);
}
static void read_frames(struct ao *ao)
{
struct priv *ac = ao->priv;
while (1) {
struct mp_frame fr = mp_pin_out_read(ac->fix_frame_size->pins[1]);
if (!fr.type)
break;
if (fr.type != MP_FRAME_AUDIO)
continue;
struct mp_aframe *af = fr.data;
encode(ao, af);
mp_frame_unref(&fr);
}
}
// Note: currently relies on samples aligned to period sizes - will not work
// in the future.
static bool audio_write(struct ao *ao, void **data, int samples)
{
struct priv *ac = ao->priv;
struct encoder_context *enc = ac->enc;
struct encode_lavc_context *ectx = ao->encode_lavc_ctx;
int bufpos = 0;
// See ao_driver.write_frames.
struct mp_aframe *af = mp_aframe_new_ref(*(struct mp_aframe **)data);
double nextpts;
int orig_samples = samples;
double pts = mp_aframe_get_pts(af);
double outpts = pts;
// for ectx PTS fields
pthread_mutex_lock(&ectx->lock);
double pts = ectx->last_audio_in_pts;
pts += ectx->samples_since_last_pts / (double)ao->samplerate;
size_t num_planes = af_fmt_is_planar(ao->format) ? ao->channels.num : 1;
void *tempdata = NULL;
void *padded[MP_NUM_CHANNELS];
if (samples % ac->aframesize) {
tempdata = talloc_new(NULL);
size_t bytelen = samples * ao->sstride;
size_t extralen = (ac->aframesize - 1) * ao->sstride;
for (int n = 0; n < num_planes; n++) {
padded[n] = talloc_size(tempdata, bytelen + extralen);
memcpy(padded[n], data[n], bytelen);
af_fill_silence((char *)padded[n] + bytelen, extralen, ao->format);
}
data = padded;
samples = (bytelen + extralen) / ao->sstride;
MP_VERBOSE(ao, "padding final frame with silence\n");
}
double outpts = pts;
if (!enc->options->rawts) {
if (!ectx->options->rawts) {
// Fix and apply the discontinuity pts offset.
nextpts = pts;
if (ectx->discontinuity_pts_offset == MP_NOPTS_VALUE) {
@ -298,44 +279,36 @@ static bool audio_write(struct ao *ao, void **data, int samples)
outpts = pts + ectx->discontinuity_pts_offset;
}
pthread_mutex_unlock(&ectx->lock);
// Shift pts by the pts offset first.
outpts += encoder_get_offset(enc);
while (samples - bufpos >= ac->aframesize) {
void *start[MP_NUM_CHANNELS] = {0};
for (int n = 0; n < num_planes; n++)
start[n] = (char *)data[n] + bufpos * ao->sstride;
encode(ao, outpts + bufpos / (double) ao->samplerate, start);
bufpos += ac->aframesize;
}
outpts += encoder_get_offset(ac->enc);
// Calculate expected pts of next audio frame (input side).
ac->expected_next_pts = pts + bufpos / (double) ao->samplerate;
pthread_mutex_lock(&ectx->lock);
ac->expected_next_pts = pts + mp_aframe_get_size(af) / (double) ao->samplerate;
// Set next allowed input pts value (input side).
if (!enc->options->rawts) {
if (!ectx->options->rawts) {
nextpts = ac->expected_next_pts + ectx->discontinuity_pts_offset;
if (nextpts > ectx->next_in_pts)
ectx->next_in_pts = nextpts;
}
talloc_free(tempdata);
int taken = MPMIN(bufpos, orig_samples);
ectx->samples_since_last_pts += taken;
pthread_mutex_unlock(&ectx->lock);
mp_aframe_set_pts(af, outpts);
// Can't push in frame if it doesn't want it output one.
mp_pin_out_request_data(ac->fix_frame_size->pins[1]);
if (!mp_pin_in_write(ac->fix_frame_size->pins[0],
MAKE_FRAME(MP_FRAME_AUDIO, af)))
return false; // shouldn't happen™
read_frames(ao);
return true;
}
static void get_state(struct ao *ao, struct mp_pcm_state *state)
{
state->free_samples = ao->device_buffer;
state->free_samples = 1;
state->queued_samples = 0;
state->delay = 0;
}
@ -359,6 +332,7 @@ const struct ao_driver audio_out_lavc = {
.description = "audio encoding using libavcodec",
.name = "lavc",
.initially_blocked = true,
.write_frames = true,
.priv_size = sizeof(struct priv),
.init = init,
.uninit = uninit,

View File

@ -31,13 +31,11 @@
#include "common/msg.h"
#include "common/common.h"
#include "input/input.h"
#include "filters/f_async_queue.h"
#include "filters/filter_internal.h"
#include "osdep/io.h"
#include "osdep/timer.h"
#include "osdep/threads.h"
#include "osdep/atomic.h"
#include "misc/ring.h"
struct buffer_state {
// Buffer and AO
@ -51,27 +49,26 @@ struct buffer_state {
// Access from AO driver's thread only.
char *convert_buffer;
// Immutable.
struct mp_async_queue *queue;
// --- protected by lock
struct mp_ring *buffers[MP_NUM_CHANNELS];
struct mp_filter *filter_root;
struct mp_filter *input; // connected to queue
struct mp_aframe *pending; // last, not fully consumed output
bool streaming; // AO streaming active
bool playing; // logically playing audio from buffer
bool paused; // logically paused; implies playing=true
bool final_chunk; // if buffer contains EOF
bool paused; // logically paused
int64_t end_time_us; // absolute output time of last played sample
int64_t underflow; // number of samples missing since last check
bool initial_unblocked;
// "Push" AOs only (AOs with driver->write).
bool still_playing;
bool hw_paused; // driver->set_pause() was used successfully
bool recover_pause; // non-hw_paused: needs to recover delay
bool draining;
bool ao_wait_low_buffer;
struct mp_pcm_state prepause_state;
pthread_t thread; // thread shoveling data to AO
bool thread_valid; // thread is running
@ -98,7 +95,7 @@ static void get_dev_state(struct ao *ao, struct mp_pcm_state *state)
{
struct buffer_state *p = ao->buffer_state;
if (p->paused) {
if (p->paused && p->playing) {
*state = p->prepause_state;
return;
}
@ -111,84 +108,66 @@ static void get_dev_state(struct ao *ao, struct mp_pcm_state *state)
ao->driver->get_state(ao, state);
}
static int unlocked_get_space(struct ao *ao)
struct mp_async_queue *ao_get_queue(struct ao *ao)
{
struct buffer_state *p = ao->buffer_state;
int space = mp_ring_available(p->buffers[0]) / ao->sstride;
// The following code attempts to keep the total buffered audio at
// ao->buffer in order to improve latency.
if (ao->driver->write) {
struct mp_pcm_state state;
get_dev_state(ao, &state);
int align = af_format_sample_alignment(ao->format);
int device_space = MPMAX(state.free_samples, 0);
int device_buffered = ao->device_buffer - device_space;
int soft_buffered = mp_ring_size(p->buffers[0]) / ao->sstride - space;
// The extra margin helps avoiding too many wakeups if the AO is fully
// byte based and doesn't do proper chunked processing.
int min_buffer = ao->buffer + 64;
int missing = min_buffer - device_buffered - soft_buffered;
missing = (missing + align - 1) / align * align;
// But always keep the device's buffer filled as much as we can.
int device_missing = device_space - soft_buffered;
missing = MPMAX(missing, device_missing);
space = MPMIN(space, missing);
space = MPMAX(0, space);
}
return space;
return p->queue;
}
int ao_get_space(struct ao *ao)
// Special behavior with data==NULL: caller uses p->pending.
static int read_buffer(struct ao *ao, void **data, int samples, bool *eof)
{
struct buffer_state *p = ao->buffer_state;
pthread_mutex_lock(&p->lock);
int space = unlocked_get_space(ao);
pthread_mutex_unlock(&p->lock);
return space;
}
int pos = 0;
*eof = false;
int ao_play(struct ao *ao, void **data, int samples, int flags)
{
struct buffer_state *p = ao->buffer_state;
pthread_mutex_lock(&p->lock);
int write_samples = mp_ring_available(p->buffers[0]) / ao->sstride;
write_samples = MPMIN(write_samples, samples);
int write_bytes = write_samples * ao->sstride;
for (int n = 0; n < ao->num_planes; n++) {
int r = mp_ring_write(p->buffers[n], data[n], write_bytes);
assert(r == write_bytes);
}
p->paused = false;
p->final_chunk = write_samples == samples && (flags & PLAYER_FINAL_CHUNK);
if (p->underflow)
MP_DBG(ao, "Audio underrun by %lld samples.\n", (long long)p->underflow);
p->underflow = 0;
if (write_samples) {
p->playing = true;
p->still_playing = true;
p->draining = false;
if (!ao->driver->write && !p->streaming) {
p->streaming = true;
ao->driver->start(ao);
while (p->playing && !p->paused && pos < samples) {
if (!p->pending || !mp_aframe_get_size(p->pending)) {
TA_FREEP(&p->pending);
struct mp_frame frame = mp_pin_out_read(p->input->pins[0]);
if (!frame.type)
break; // we can't/don't want to block
if (frame.type != MP_FRAME_AUDIO) {
if (frame.type == MP_FRAME_EOF)
*eof = true;
mp_frame_unref(&frame);
continue;
}
p->pending = frame.data;
}
if (!data)
break;
int copy = mp_aframe_get_size(p->pending);
uint8_t **fdata = mp_aframe_get_data_ro(p->pending);
copy = MPMIN(copy, samples - pos);
for (int n = 0; n < ao->num_planes; n++) {
memcpy((char *)data[n] + pos * ao->sstride,
fdata[n], copy * ao->sstride);
}
mp_aframe_skip_samples(p->pending, copy);
pos += copy;
*eof = false;
}
pthread_mutex_unlock(&p->lock);
if (write_samples)
ao_wakeup_playthread(ao);
if (!data) {
if (!p->pending)
return 0;
void **pd = (void *)mp_aframe_get_data_rw(p->pending);
if (pd)
ao_post_process_data(ao, pd, mp_aframe_get_size(p->pending));
return 1;
}
return write_samples;
// pad with silence (underflow/paused/eof)
for (int n = 0; n < ao->num_planes; n++) {
af_fill_silence((char *)data[n] + pos, (samples - pos) * ao->sstride,
ao->format);
}
ao_post_process_data(ao, data, pos);
return pos;
}
// Read the given amount of samples in the user-provided data buffer. Returns
@ -202,47 +181,24 @@ int ao_play(struct ao *ao, void **data, int samples, int flags)
int ao_read_data(struct ao *ao, void **data, int samples, int64_t out_time_us)
{
struct buffer_state *p = ao->buffer_state;
int full_bytes = samples * ao->sstride;
bool need_wakeup = false;
int bytes = 0;
assert(!ao->driver->write);
pthread_mutex_lock(&p->lock);
if (!p->playing || p->paused)
goto end;
int pos = read_buffer(ao, data, samples, &(bool){0});
int buffered_bytes = mp_ring_buffered(p->buffers[0]);
bytes = MPMIN(buffered_bytes, full_bytes);
if (full_bytes > bytes && !p->final_chunk) {
p->underflow += (full_bytes - bytes) / ao->sstride;
ao_add_events(ao, AO_EVENT_UNDERRUN);
}
if (bytes > 0)
if (pos > 0)
p->end_time_us = out_time_us;
for (int n = 0; n < ao->num_planes; n++)
mp_ring_read(p->buffers[n], data[n], bytes);
// Half of the buffer played -> request more.
if (!ao->driver->write)
need_wakeup = buffered_bytes - bytes <= mp_ring_size(p->buffers[0]) / 2;
end:
if (pos < samples && p->playing && !p->paused) {
p->playing = false;
// For ao_drain().
pthread_cond_broadcast(&p->wakeup);
}
pthread_mutex_unlock(&p->lock);
if (need_wakeup)
ao->wakeup_cb(ao->wakeup_ctx);
// pad with silence (underflow/paused/eof)
for (int n = 0; n < ao->num_planes; n++)
af_fill_silence((char *)data[n] + bytes, full_bytes - bytes, ao->format);
ao_post_process_data(ao, data, samples);
return bytes / ao->sstride;
return pos;
}
// Same as ao_read_data(), but convert data according to *fmt.
@ -300,11 +256,13 @@ int ao_control(struct ao *ao, enum aocontrol cmd, void *arg)
return r;
}
static double unlocked_get_delay(struct ao *ao)
double ao_get_delay(struct ao *ao)
{
struct buffer_state *p = ao->buffer_state;
double driver_delay = 0;
pthread_mutex_lock(&p->lock);
double driver_delay;
if (ao->driver->write) {
struct mp_pcm_state state;
get_dev_state(ao, &state);
@ -312,22 +270,18 @@ static double unlocked_get_delay(struct ao *ao)
} else {
int64_t end = p->end_time_us;
int64_t now = mp_time_us();
driver_delay += MPMAX(0, (end - now) / (1000.0 * 1000.0));
driver_delay = MPMAX(0, (end - now) / (1000.0 * 1000.0));
}
return mp_ring_buffered(p->buffers[0]) / (double)ao->bps + driver_delay;
}
int pending = mp_async_queue_get_samples(p->queue);
if (p->pending)
pending += mp_aframe_get_size(p->pending);
double ao_get_delay(struct ao *ao)
{
struct buffer_state *p = ao->buffer_state;
pthread_mutex_lock(&p->lock);
double delay = unlocked_get_delay(ao);
pthread_mutex_unlock(&p->lock);
return delay;
return driver_delay + pending / (double)ao->samplerate;
}
// Fully stop playback; clear buffers, including queue.
void ao_reset(struct ao *ao)
{
struct buffer_state *p = ao->buffer_state;
@ -336,8 +290,10 @@ void ao_reset(struct ao *ao)
pthread_mutex_lock(&p->lock);
for (int n = 0; n < ao->num_planes; n++)
mp_ring_reset(p->buffers[n]);
TA_FREEP(&p->pending);
mp_async_queue_reset(p->queue);
mp_filter_reset(p->filter_root);
mp_async_queue_resume_reading(p->queue);
if (!ao->stream_silence && ao->driver->reset) {
if (ao->driver->write) {
@ -349,17 +305,12 @@ void ao_reset(struct ao *ao)
}
p->streaming = false;
}
p->paused = false;
wakeup = p->playing;
p->playing = false;
p->recover_pause = false;
p->hw_paused = false;
wakeup = p->still_playing || p->draining;
p->draining = false;
p->still_playing = false;
p->end_time_us = 0;
atomic_fetch_and(&ao->events_, ~(unsigned int)AO_EVENT_UNDERRUN);
pthread_mutex_unlock(&p->lock);
if (do_reset)
@ -369,7 +320,29 @@ void ao_reset(struct ao *ao)
ao_wakeup_playthread(ao);
}
void ao_pause(struct ao *ao)
// Initiate playback. This moves from the stop/underrun state to actually
// playing (orthogonally taking the paused state into account). Plays all
// data in the queue, and goes into underrun state if no more data available.
// No-op if already running.
void ao_start(struct ao *ao)
{
struct buffer_state *p = ao->buffer_state;
pthread_mutex_lock(&p->lock);
p->playing = true;
if (!ao->driver->write && !p->streaming) {
p->streaming = true;
ao->driver->start(ao);
}
pthread_mutex_unlock(&p->lock);
ao_wakeup_playthread(ao);
}
void ao_set_paused(struct ao *ao, bool paused)
{
struct buffer_state *p = ao->buffer_state;
bool wakeup = false;
@ -377,7 +350,7 @@ void ao_pause(struct ao *ao)
pthread_mutex_lock(&p->lock);
if (p->playing && !p->paused) {
if (p->playing && !p->paused && paused) {
if (p->streaming && !ao->stream_silence) {
if (ao->driver->write) {
if (!p->recover_pause)
@ -387,6 +360,7 @@ void ao_pause(struct ao *ao)
} else {
ao->driver->reset(ao);
p->streaming = false;
p->recover_pause = !ao->untimed;
}
} else if (ao->driver->reset) {
// See ao_reset() why this is done outside of the lock.
@ -394,9 +368,20 @@ void ao_pause(struct ao *ao)
p->streaming = false;
}
}
p->paused = true;
wakeup = true;
} else if (p->playing && p->paused && !paused) {
if (ao->driver->write) {
if (p->hw_paused)
ao->driver->set_pause(ao, false);
p->hw_paused = false;
} else {
if (!p->streaming)
ao->driver->start(ao);
p->streaming = true;
}
wakeup = true;
}
p->paused = paused;
pthread_mutex_unlock(&p->lock);
@ -407,52 +392,19 @@ void ao_pause(struct ao *ao)
ao_wakeup_playthread(ao);
}
void ao_resume(struct ao *ao)
{
struct buffer_state *p = ao->buffer_state;
bool wakeup = false;
pthread_mutex_lock(&p->lock);
if (p->playing && p->paused) {
if (ao->driver->write) {
if (p->streaming && p->hw_paused) {
ao->driver->set_pause(ao, false);
} else {
p->recover_pause = true;
}
p->hw_paused = false;
} else {
if (!p->streaming)
ao->driver->start(ao);
p->streaming = true;
}
p->paused = false;
wakeup = true;
}
pthread_mutex_unlock(&p->lock);
if (wakeup)
ao_wakeup_playthread(ao);
}
bool ao_eof_reached(struct ao *ao)
// Whether audio is playing. This means that there is still data in the buffers,
// and ao_start() was called. This returns true even if playback was logically
// paused. On false, EOF was reached, or an underrun happened, or ao_reset()
// was called.
bool ao_is_playing(struct ao *ao)
{
struct buffer_state *p = ao->buffer_state;
pthread_mutex_lock(&p->lock);
bool eof = !p->playing;
if (ao->driver->write) {
eof |= !p->still_playing;
} else {
// For simplicity, ignore the latency. Otherwise, we would have to run
// an extra thread to time it.
eof |= mp_ring_buffered(p->buffers[0]) == 0;
}
bool playing = p->playing;
pthread_mutex_unlock(&p->lock);
return eof;
return playing;
}
// Block until the current audio buffer has played completely.
@ -461,35 +413,25 @@ void ao_drain(struct ao *ao)
struct buffer_state *p = ao->buffer_state;
pthread_mutex_lock(&p->lock);
p->final_chunk = true;
while (!p->paused && p->still_playing && p->streaming) {
if (ao->driver->write) {
if (p->draining) {
// Wait for EOF signal from AO.
pthread_cond_wait(&p->wakeup, &p->lock);
} else {
p->draining = true;
MP_VERBOSE(ao, "waiting for draining...\n");
pthread_mutex_unlock(&p->lock);
ao_wakeup_playthread(ao);
pthread_mutex_lock(&p->lock);
}
} else {
double left = mp_ring_buffered(p->buffers[0]) / (double)ao->bps * 1e6;
while (!p->paused && p->playing) {
pthread_mutex_unlock(&p->lock);
double delay = ao_get_delay(ao);
pthread_mutex_lock(&p->lock);
// Limit to buffer + arbitrary ~250ms max. waiting for robustness.
delay += mp_async_queue_get_samples(p->queue) / (double)ao->samplerate;
struct timespec ts = mp_rel_time_to_timespec(MPMAX(delay, 0) + 0.25);
// Wait for EOF signal from AO.
if (pthread_cond_timedwait(&p->wakeup, &p->lock, &ts)) {
MP_VERBOSE(ao, "drain timeout\n");
break;
}
if (!p->playing && mp_async_queue_get_samples(p->queue)) {
MP_WARN(ao, "underrun during draining\n");
pthread_mutex_unlock(&p->lock);
if (left > 0) {
// Wait for lower bound.
mp_sleep_us(left);
// And then poll for actual end. No other way.
// Limit to arbitrary ~250ms max. waiting for robustness.
int64_t max = mp_time_us() + 250000;
while (mp_time_us() < max && !ao_eof_reached(ao))
mp_sleep_us(1);
} else {
p->still_playing = false;
}
ao_start(ao);
pthread_mutex_lock(&p->lock);
}
}
@ -498,6 +440,12 @@ void ao_drain(struct ao *ao)
ao_reset(ao);
}
static void wakeup_filters(void *ctx)
{
struct ao *ao = ctx;
ao_wakeup_playthread(ao);
}
void ao_uninit(struct ao *ao)
{
struct buffer_state *p = ao->buffer_state;
@ -515,6 +463,9 @@ void ao_uninit(struct ao *ao)
if (ao->driver_initialized)
ao->driver->uninit(ao);
talloc_free(p->filter_root);
talloc_free(p->queue);
talloc_free(p->pending);
talloc_free(p->convert_buffer);
talloc_free(p->temp_buf);
@ -542,16 +493,28 @@ bool init_buffer_post(struct ao *ao)
assert(ao->driver->get_state);
}
for (int n = 0; n < ao->num_planes; n++)
p->buffers[n] = mp_ring_new(ao, ao->buffer * ao->sstride);
mpthread_mutex_init_recursive(&p->lock);
pthread_mutex_init(&p->lock, NULL);
pthread_cond_init(&p->wakeup, NULL);
pthread_mutex_init(&p->pt_lock, NULL);
pthread_cond_init(&p->pt_wakeup, NULL);
p->queue = mp_async_queue_create();
p->filter_root = mp_filter_create_root(ao->global);
p->input = mp_async_queue_create_filter(p->filter_root, MP_PIN_OUT, p->queue);
mp_async_queue_resume_reading(p->queue);
struct mp_async_queue_config cfg = {
.sample_unit = AQUEUE_UNIT_SAMPLES,
.max_samples = ao->buffer,
.max_bytes = INT64_MAX,
};
mp_async_queue_set_config(p->queue, cfg);
if (ao->driver->write) {
mp_filter_graph_set_wakeup_cb(p->filter_root, wakeup_filters, ao);
p->thread_valid = true;
if (pthread_create(&p->thread, NULL, playthread, ao)) {
p->thread_valid = false;
@ -590,89 +553,85 @@ static bool realloc_buf(struct ao *ao, int samples)
}
// called locked
static void ao_play_data(struct ao *ao)
static bool ao_play_data(struct ao *ao)
{
struct buffer_state *p = ao->buffer_state;
if (!(p->playing && (!p->paused || ao->stream_silence)))
return false;
struct mp_pcm_state state;
get_dev_state(ao, &state);
if (p->streaming && !state.playing && !ao->untimed) {
if (p->draining) {
MP_VERBOSE(ao, "underrun signaled for audio end\n");
p->still_playing = false;
pthread_cond_broadcast(&p->wakeup);
} else {
ao_add_events(ao, AO_EVENT_UNDERRUN);
if (p->streaming && !state.playing && !ao->untimed)
goto eof;
void **planes = NULL;
int space = state.free_samples;
if (!space)
return false;
assert(space >= 0);
int samples = 0;
bool got_eof = false;
if (ao->driver->write_frames) {
TA_FREEP(&p->pending);
samples = read_buffer(ao, NULL, 1, &got_eof);
planes = (void **)&p->pending;
} else {
if (!realloc_buf(ao, space)) {
MP_ERR(ao, "Failed to allocate buffer.\n");
return false;
}
planes = (void **)mp_aframe_get_data_rw(p->temp_buf);
assert(planes);
if (p->recover_pause) {
samples = MPCLAMP(p->prepause_state.delay * ao->samplerate, 0, space);
p->recover_pause = false;
mp_aframe_set_silence(p->temp_buf, 0, space);
}
p->streaming = false;
if (!samples) {
samples = read_buffer(ao, planes, space, &got_eof);
if (p->paused || (ao->stream_silence && !p->playing))
samples = space; // read_buffer() sets remainder to silent
}
}
// Round free space to period sizes to reduce number of write() calls.
int space = state.free_samples / ao->period_size * ao->period_size;
bool play_silence = p->paused || (ao->stream_silence && !p->still_playing);
space = MPMAX(space, 0);
if (!realloc_buf(ao, space)) {
MP_ERR(ao, "Failed to allocate buffer.\n");
return;
}
void **planes = (void **)mp_aframe_get_data_rw(p->temp_buf);
assert(planes);
int samples = mp_ring_buffered(p->buffers[0]) / ao->sstride;
if (samples > space)
samples = space;
if (play_silence)
samples = space;
if (p->recover_pause) {
samples = MPCLAMP(p->prepause_state.delay * ao->samplerate, 0, space);
p->recover_pause = false;
mp_aframe_set_silence(p->temp_buf, 0, space);
} else {
samples = ao_read_data(ao, planes, samples, 0);
}
if (play_silence)
samples = space; // ao_read_data() sets remainder to silent
bool is_eof = p->final_chunk && samples < space;
bool ok = true;
int written = 0;
if (samples) {
p->draining |= is_eof;
MP_STATS(ao, "start ao fill");
ok = ao->driver->write(ao, planes, samples);
if (!ao->driver->write(ao, planes, samples))
MP_ERR(ao, "Error writing audio to device.\n");
MP_STATS(ao, "end ao fill");
}
if (!ok)
MP_ERR(ao, "Error writing audio to device.\n");
if (samples > 0 && ok) {
written = samples;
if (!p->streaming) {
MP_VERBOSE(ao, "starting AO\n");
ao->driver->start(ao);
p->streaming = true;
state.playing = true;
}
p->still_playing = !play_silence;
}
if (p->draining && p->still_playing && ao->untimed) {
p->still_playing = false;
pthread_cond_broadcast(&p->wakeup);
MP_TRACE(ao, "in=%d space=%d(%d) pl=%d, eof=%d\n",
samples, space, state.free_samples, p->playing, got_eof);
if (got_eof)
goto eof;
return samples > 0;
eof:
MP_VERBOSE(ao, "audio end or underrun\n");
// Normal AOs signal EOF on underrun, untimed AOs never signal underruns.
if (ao->untimed || !state.playing) {
p->streaming = false;
p->playing = false;
}
// Wait until space becomes available. Also wait if we actually wrote data,
// so the AO wakes us up properly if it needs more data.
p->ao_wait_low_buffer = space == 0 || written > 0 || p->draining;
// Request more data if we're below some random buffer level.
int needed = unlocked_get_space(ao);
bool more = needed >= ao->device_buffer / 4 && !p->final_chunk;
if (more)
ao->wakeup_cb(ao->wakeup_ctx); // request more data
MP_TRACE(ao, "in=%d eof=%d space=%d r=%d wa/pl/dr=%d/%d/%d needed=%d more=%d\n",
samples, is_eof, space, written, p->ao_wait_low_buffer,
p->still_playing, p->draining, needed, more);
ao->wakeup_cb(ao->wakeup_ctx);
// For ao_drain().
pthread_cond_broadcast(&p->wakeup);
return true;
}
static void *playthread(void *arg)
@ -683,20 +642,18 @@ static void *playthread(void *arg)
while (1) {
pthread_mutex_lock(&p->lock);
bool blocked = ao->driver->initially_blocked && !p->initial_unblocked;
bool playing = !p->paused && (p->playing || ao->stream_silence);
if (playing && !blocked)
ao_play_data(ao);
bool progress = false;
if (!ao->driver->initially_blocked || p->initial_unblocked)
progress = ao_play_data(ao);
// Wait until the device wants us to write more data to it.
// Fallback to guessing.
double timeout = INFINITY;
if (p->ao_wait_low_buffer) {
if (p->streaming && !p->paused && !progress) {
// Wake up again if half of the audio buffer has been played.
// Since audio could play at a faster or slower pace, wake up twice
// as often as ideally needed.
timeout = ao->device_buffer / (double)ao->samplerate * 0.25;
p->ao_wait_low_buffer = false;
}
pthread_mutex_unlock(&p->lock);
@ -706,7 +663,7 @@ static void *playthread(void *arg)
pthread_mutex_unlock(&p->pt_lock);
break;
}
if (!p->need_wakeup) {
if (!p->need_wakeup && !progress) {
MP_STATS(ao, "start audio wait");
struct timespec ts = mp_rel_time_to_timespec(timeout);
pthread_cond_timedwait(&p->pt_wakeup, &p->pt_lock, &ts);

View File

@ -139,6 +139,9 @@ struct ao_driver {
// first write() call is done. Encode mode uses this, and push mode
// respects it automatically (don't use with pull mode).
bool initially_blocked;
// If true, write units of entire frames. The write() call is modified to
// use data==mp_aframe. Useful for encoding AO only.
bool write_frames;
// Init the device using ao->format/ao->channels/ao->samplerate. If the
// device doesn't accept these parameters, you can attempt to negotiate
// fallback parameters, and set the ao format fields accordingly.

View File

@ -60,7 +60,6 @@ void encode_lavc_stream_eof(struct encode_lavc_context *ctx,
enum stream_type type);
void encode_lavc_set_metadata(struct encode_lavc_context *ctx,
struct mp_tags *metadata);
void encode_lavc_set_audio_pts(struct encode_lavc_context *ctx, double pts);
bool encode_lavc_didfail(struct encode_lavc_context *ctx); // check if encoding failed
#endif

View File

@ -243,16 +243,6 @@ bool encode_lavc_free(struct encode_lavc_context *ctx)
return res;
}
void encode_lavc_set_audio_pts(struct encode_lavc_context *ctx, double pts)
{
if (ctx) {
pthread_mutex_lock(&ctx->lock);
ctx->last_audio_in_pts = pts;
ctx->samples_since_last_pts = 0;
pthread_mutex_unlock(&ctx->lock);
}
}
// called locked
static void maybe_init_muxer(struct encode_lavc_context *ctx)
{
@ -503,10 +493,7 @@ void encode_lavc_discontinuity(struct encode_lavc_context *ctx)
return;
pthread_mutex_lock(&ctx->lock);
ctx->audio_pts_offset = MP_NOPTS_VALUE;
ctx->discontinuity_pts_offset = MP_NOPTS_VALUE;
pthread_mutex_unlock(&ctx->lock);
}

View File

@ -49,12 +49,6 @@ struct encode_lavc_context {
// must lock manually before accessing state.
pthread_mutex_t lock;
// sync to audio mode
double audio_pts_offset;
double last_audio_in_pts;
int64_t samples_since_last_pts;
// anti discontinuity mode
double next_in_pts;
double discontinuity_pts_offset;

View File

@ -31,11 +31,12 @@
#include "common/common.h"
#include "osdep/timer.h"
#include "audio/audio_buffer.h"
#include "audio/format.h"
#include "audio/out/ao.h"
#include "demux/demux.h"
#include "filters/f_async_queue.h"
#include "filters/f_decoder_wrapper.h"
#include "filters/filter_internal.h"
#include "core.h"
#include "command.h"
@ -46,6 +47,8 @@ enum {
AD_WAIT = -4,
};
static void ao_process(struct mp_filter *f);
static void update_speed_filters(struct MPContext *mpctx)
{
struct ao_chain *ao_c = mpctx->ao_chain;
@ -183,11 +186,12 @@ void update_playback_speed(struct MPContext *mpctx)
static void ao_chain_reset_state(struct ao_chain *ao_c)
{
ao_c->last_out_pts = MP_NOPTS_VALUE;
TA_FREEP(&ao_c->output_frame);
ao_c->out_eof = false;
ao_c->underrun = false;
mp_audio_buffer_clear(ao_c->ao_buffer);
ao_c->start_pts_known = false;
ao_c->start_pts = MP_NOPTS_VALUE;
ao_c->untimed_throttle = false;
ao_c->underrun = false;
}
void reset_audio_state(struct MPContext *mpctx)
@ -204,10 +208,18 @@ void reset_audio_state(struct MPContext *mpctx)
void uninit_audio_out(struct MPContext *mpctx)
{
struct ao_chain *ao_c = mpctx->ao_chain;
if (ao_c) {
ao_c->ao_queue = NULL;
TA_FREEP(&ao_c->queue_filter);
ao_c->ao = NULL;
}
if (mpctx->ao) {
// Note: with gapless_audio, stop_play is not correctly set
if (mpctx->opts->gapless_audio || mpctx->stop_play == AT_END_OF_FILE)
if (mpctx->opts->gapless_audio || mpctx->stop_play == AT_END_OF_FILE) {
MP_VERBOSE(mpctx, "draining left over audio\n");
ao_drain(mpctx->ao);
}
ao_uninit(mpctx->ao);
mp_notify(mpctx, MPV_EVENT_AUDIO_RECONFIG, NULL);
@ -232,8 +244,7 @@ static void ao_chain_uninit(struct ao_chain *ao_c)
mp_pin_disconnect(ao_c->filter_src);
talloc_free(ao_c->filter->f);
talloc_free(ao_c->output_frame);
talloc_free(ao_c->ao_buffer);
talloc_free(ao_c->ao_filter);
talloc_free(ao_c);
}
@ -289,6 +300,25 @@ done:
return res;
}
static void ao_chain_set_ao(struct ao_chain *ao_c, struct ao *ao)
{
if (ao_c->ao != ao) {
assert(!ao_c->ao);
ao_c->ao = ao;
ao_c->ao_queue = ao_get_queue(ao_c->ao);
ao_c->queue_filter = mp_async_queue_create_filter(ao_c->ao_filter,
MP_PIN_IN, ao_c->ao_queue);
mp_async_queue_set_notifier(ao_c->queue_filter, ao_c->ao_filter);
// Make sure filtering never stops with frames stuck in access filter.
mp_filter_set_high_priority(ao_c->queue_filter, true);
}
if (ao_c->filter->ao_needs_update)
mp_output_chain_set_ao(ao_c->filter, ao_c->ao);
mp_filter_wakeup(ao_c->ao_filter);
}
static void reinit_audio_filters_and_output(struct MPContext *mpctx)
{
struct MPOpts *opts = mpctx->opts;
@ -296,10 +326,7 @@ static void reinit_audio_filters_and_output(struct MPContext *mpctx)
assert(ao_c);
struct track *track = ao_c->track;
if (!ao_c->filter->ao_needs_update)
return;
TA_FREEP(&ao_c->output_frame); // stale?
assert(ao_c->filter->ao_needs_update);
// The "ideal" filter output format
struct mp_aframe *out_fmt = mp_aframe_new_ref(ao_c->filter->output_aformat);
@ -327,7 +354,26 @@ static void reinit_audio_filters_and_output(struct MPContext *mpctx)
keep_weak_gapless_format(mpctx->ao_filter_fmt, out_fmt)) ||
(mpctx->ao && opts->gapless_audio > 0))
{
mp_output_chain_set_ao(ao_c->filter, mpctx->ao);
ao_chain_set_ao(ao_c, mpctx->ao);
talloc_free(out_fmt);
return;
}
// Wait until all played.
if (mpctx->ao && ao_is_playing(mpctx->ao)) {
talloc_free(out_fmt);
return;
}
// Format change during syncing. Force playback start early, then wait.
if (ao_c->ao_queue && mp_async_queue_get_frames(ao_c->ao_queue) &&
mpctx->audio_status == STATUS_SYNCING)
{
mpctx->audio_status = STATUS_READY;
mp_wakeup_core(mpctx);
talloc_free(out_fmt);
return;
}
if (mpctx->audio_status == STATUS_READY) {
talloc_free(out_fmt);
return;
}
@ -367,7 +413,6 @@ static void reinit_audio_filters_and_output(struct MPContext *mpctx)
mpctx->ao = ao_init_best(mpctx->global, ao_flags, mp_wakeup_core_cb,
mpctx, mpctx->encode_lavc_ctx, out_rate,
out_format, out_channels);
ao_c->ao = mpctx->ao;
int ao_rate = 0;
int ao_format = 0;
@ -383,7 +428,6 @@ static void reinit_audio_filters_and_output(struct MPContext *mpctx)
MP_ERR(mpctx, "Passthrough format unsupported.\n");
ao_uninit(mpctx->ao);
mpctx->ao = NULL;
ao_c->ao = NULL;
}
}
@ -407,9 +451,6 @@ static void reinit_audio_filters_and_output(struct MPContext *mpctx)
goto init_error;
}
mp_audio_buffer_reinit_fmt(ao_c->ao_buffer, ao_format, &ao_channels,
ao_rate);
char tmp[192];
MP_INFO(mpctx, "AO: [%s] %s\n", ao_get_name(mpctx->ao),
audio_config_to_str_buf(tmp, sizeof(tmp), ao_rate, ao_format,
@ -420,10 +461,17 @@ static void reinit_audio_filters_and_output(struct MPContext *mpctx)
ao_c->ao_resume_time =
opts->audio_wait_open > 0 ? mp_time_sec() + opts->audio_wait_open : 0;
mp_output_chain_set_ao(ao_c->filter, mpctx->ao);
ao_set_paused(mpctx->ao, get_internal_paused(mpctx));
ao_chain_set_ao(ao_c, mpctx->ao);
audio_update_volume(mpctx);
// Almost nonsensical hack to deal with certain format change scenarios.
if (mpctx->audio_status == STATUS_PLAYING)
ao_start(mpctx->ao);
mp_wakeup_core(mpctx);
mp_notify(mpctx, MPV_EVENT_AUDIO_RECONFIG, NULL);
return;
@ -472,6 +520,11 @@ void reinit_audio_chain(struct MPContext *mpctx)
reinit_audio_chain_src(mpctx, track);
}
static const struct mp_filter_info ao_filter = {
.name = "ao",
.process = ao_process,
};
// (track=NULL creates a blank chain, used for lavfi-complex)
void reinit_audio_chain_src(struct MPContext *mpctx, struct track *track)
{
@ -481,15 +534,22 @@ void reinit_audio_chain_src(struct MPContext *mpctx, struct track *track)
struct ao_chain *ao_c = talloc_zero(NULL, struct ao_chain);
mpctx->ao_chain = ao_c;
ao_c->mpctx = mpctx;
ao_c->log = mpctx->log;
ao_c->filter =
mp_output_chain_create(mpctx->filter_root, MP_OUTPUT_CHAIN_AUDIO);
ao_c->spdif_passthrough = true;
ao_c->last_out_pts = MP_NOPTS_VALUE;
ao_c->ao_buffer = mp_audio_buffer_create(NULL);
ao_c->ao = mpctx->ao;
ao_c->delay = mpctx->opts->audio_delay;
ao_c->ao_filter = mp_filter_create(mpctx->filter_root, &ao_filter);
if (!ao_c->filter || !ao_c->ao_filter)
goto init_error;
ao_c->ao_filter->priv = ao_c;
mp_filter_add_pin(ao_c->ao_filter, MP_PIN_IN, "in");
mp_pin_connect(ao_c->ao_filter->pins[0], ao_c->filter->f->pins[1]);
if (track) {
ao_c->track = track;
track->ao_c = ao_c;
@ -504,15 +564,8 @@ void reinit_audio_chain_src(struct MPContext *mpctx, struct track *track)
if (recreate_audio_filters(mpctx) < 0)
goto init_error;
if (mpctx->ao) {
int rate;
int format;
struct mp_chmap channels;
ao_get_format(mpctx->ao, &rate, &format, &channels);
mp_audio_buffer_reinit_fmt(ao_c->ao_buffer, format, &channels, rate);
if (mpctx->ao)
audio_update_volume(mpctx);
}
mp_wakeup_core(mpctx);
return;
@ -523,25 +576,11 @@ init_error:
error_on_track(mpctx, track);
}
// Return pts value corresponding to the end point of audio written to the
// ao so far.
// Return pts value corresponding to the start point of audio written to the
// ao queue so far.
double written_audio_pts(struct MPContext *mpctx)
{
struct ao_chain *ao_c = mpctx->ao_chain;
if (!ao_c)
return MP_NOPTS_VALUE;
// end pts of audio that has been output by filters
double a_pts = ao_c->last_out_pts;
if (a_pts == MP_NOPTS_VALUE)
return MP_NOPTS_VALUE;
// Data that was ready for ao but was buffered because ao didn't fully
// accept everything to internal buffers yet. This also does not correctly
// track playback speed, so we use the current speed.
a_pts -= mp_audio_buffer_seconds(ao_c->ao_buffer) * mpctx->audio_speed;
return a_pts;
return mpctx->ao_chain ? mpctx->ao_chain->last_out_pts : MP_NOPTS_VALUE;
}
// Return pts value corresponding to currently playing audio.
@ -553,187 +592,125 @@ double playing_audio_pts(struct MPContext *mpctx)
return pts - mpctx->audio_speed * ao_get_delay(mpctx->ao);
}
static int write_to_ao(struct MPContext *mpctx, uint8_t **planes, int samples,
int flags)
{
if (mpctx->paused)
return 0;
struct ao *ao = mpctx->ao;
int samplerate;
int format;
struct mp_chmap channels;
ao_get_format(ao, &samplerate, &format, &channels);
encode_lavc_set_audio_pts(mpctx->encode_lavc_ctx, playing_audio_pts(mpctx));
if (samples == 0)
return 0;
double real_samplerate = samplerate / mpctx->audio_speed;
int played = ao_play(mpctx->ao, (void **)planes, samples, flags);
assert(played <= samples);
if (played > 0) {
mpctx->shown_aframes += played;
mpctx->delay += played / real_samplerate;
return played;
}
return 0;
}
// Return the number of samples that must be skipped or prepended to reach the
// target audio pts after a seek (for A/V sync or hr-seek).
// Return value (*skip):
// >0: skip this many samples
// =0: don't do anything
// <0: prepend this many samples of silence
// Returns false if PTS is not known yet.
static bool get_sync_samples(struct MPContext *mpctx, int *skip)
{
struct MPOpts *opts = mpctx->opts;
*skip = 0;
if (mpctx->audio_status != STATUS_SYNCING)
return true;
int ao_rate;
int ao_format;
struct mp_chmap ao_channels;
ao_get_format(mpctx->ao, &ao_rate, &ao_format, &ao_channels);
double play_samplerate = ao_rate / mpctx->audio_speed;
if (!opts->initial_audio_sync) {
mpctx->audio_status = STATUS_FILLING;
return true;
}
double written_pts = written_audio_pts(mpctx);
if (written_pts == MP_NOPTS_VALUE &&
!mp_audio_buffer_samples(mpctx->ao_chain->ao_buffer))
return false; // no audio read yet
bool sync_to_video = mpctx->vo_chain && mpctx->video_status != STATUS_EOF &&
!mpctx->vo_chain->is_sparse;
double sync_pts = MP_NOPTS_VALUE;
if (sync_to_video) {
if (mpctx->video_status < STATUS_READY)
return false; // wait until we know a video PTS
if (mpctx->video_pts != MP_NOPTS_VALUE)
sync_pts = mpctx->video_pts - opts->audio_delay;
} else if (mpctx->hrseek_active) {
sync_pts = mpctx->hrseek_pts;
} else {
// If audio-only is enabled mid-stream during playback, sync accordingly.
sync_pts = mpctx->playback_pts;
}
if (sync_pts == MP_NOPTS_VALUE) {
mpctx->audio_status = STATUS_FILLING;
return true; // syncing disabled
}
double ptsdiff = written_pts - sync_pts;
// Missing timestamp, or PTS reset, or just broken.
if (written_pts == MP_NOPTS_VALUE) {
MP_WARN(mpctx, "Failed audio resync.\n");
mpctx->audio_status = STATUS_FILLING;
return true;
}
ptsdiff = MPCLAMP(ptsdiff, -3600, 3600);
MP_VERBOSE(mpctx, "audio sync: sync_to_video=%d, offset=%f\n",
sync_to_video, ptsdiff);
int align = af_format_sample_alignment(ao_format);
*skip = (int)(-ptsdiff * play_samplerate) / align * align;
return true;
}
static bool copy_output(struct MPContext *mpctx, struct ao_chain *ao_c,
int minsamples, double endpts, bool *seteof)
{
struct mp_audio_buffer *outbuf = ao_c->ao_buffer;
int ao_rate;
int ao_format;
struct mp_chmap ao_channels;
ao_get_format(ao_c->ao, &ao_rate, &ao_format, &ao_channels);
while (mp_audio_buffer_samples(outbuf) < minsamples) {
int cursamples = mp_audio_buffer_samples(outbuf);
int maxsamples = INT_MAX;
if (endpts != MP_NOPTS_VALUE) {
double rate = ao_rate / mpctx->audio_speed;
double curpts = written_audio_pts(mpctx);
if (curpts != MP_NOPTS_VALUE) {
double remaining =
(endpts - curpts - mpctx->opts->audio_delay) * rate;
maxsamples = MPCLAMP(remaining, 0, INT_MAX);
}
}
if (!ao_c->output_frame || !mp_aframe_get_size(ao_c->output_frame)) {
TA_FREEP(&ao_c->output_frame);
struct mp_frame frame = mp_pin_out_read(ao_c->filter->f->pins[1]);
if (frame.type == MP_FRAME_AUDIO) {
ao_c->output_frame = frame.data;
ao_c->out_eof = false;
ao_c->last_out_pts = mp_aframe_end_pts(ao_c->output_frame);
} else if (frame.type == MP_FRAME_EOF) {
ao_c->out_eof = true;
} else if (frame.type) {
MP_ERR(mpctx, "unknown frame type\n");
mp_frame_unref(&frame);
}
}
// out of data
if (!ao_c->output_frame) {
if (ao_c->out_eof) {
*seteof = true;
return true;
}
return false;
}
if (cursamples + mp_aframe_get_size(ao_c->output_frame) > maxsamples) {
if (cursamples < maxsamples) {
uint8_t **data = mp_aframe_get_data_ro(ao_c->output_frame);
mp_audio_buffer_append(outbuf, (void **)data,
maxsamples - cursamples);
mp_aframe_skip_samples(ao_c->output_frame,
maxsamples - cursamples);
}
*seteof = true;
return true;
}
uint8_t **data = mp_aframe_get_data_ro(ao_c->output_frame);
mp_audio_buffer_append(outbuf, (void **)data,
mp_aframe_get_size(ao_c->output_frame));
TA_FREEP(&ao_c->output_frame);
}
return true;
}
/* Try to get at least minsamples decoded+filtered samples in outbuf
* (total length including possible existing data).
* Return 0 on success, or negative AD_* error code.
* In the former case outbuf has at least minsamples buffered on return.
* In case of EOF/error it might or might not be. */
static int filter_audio(struct MPContext *mpctx, struct mp_audio_buffer *outbuf,
int minsamples)
// This garbage is needed for untimed AOs. These consume audio infinitely fast,
// so try keeping approximate A/V sync by blocking audio transfer as needed.
static void update_throttle(struct MPContext *mpctx)
{
struct ao_chain *ao_c = mpctx->ao_chain;
bool new_throttle = mpctx->audio_status == STATUS_PLAYING &&
mpctx->delay > 0 && ao_c && ao_c->ao &&
ao_untimed(ao_c->ao) &&
mpctx->video_status != STATUS_EOF;
if (ao_c && new_throttle != ao_c->untimed_throttle) {
ao_c->untimed_throttle = new_throttle;
mp_wakeup_core(mpctx);
mp_filter_wakeup(ao_c->ao_filter);
}
}
double endpts = get_play_end_pts(mpctx);
if (endpts != MP_NOPTS_VALUE)
endpts *= mpctx->play_dir;
static void ao_process(struct mp_filter *f)
{
struct ao_chain *ao_c = f->priv;
struct MPContext *mpctx = ao_c->mpctx;
bool eof = false;
if (!copy_output(mpctx, ao_c, minsamples, endpts, &eof))
return AD_WAIT;
return eof ? AD_EOF : AD_OK;
if (!ao_c->queue_filter) {
// This will eventually lead to the creation of the AO + queue, due
// to how f_output_chain and AO management works.
mp_pin_out_request_data(f->ppins[0]);
return;
}
// Due to mp_async_queue_set_notifier() thhis function is called when the
// queue becomes full. This affects state changes in the normal playloop,
// so wake it up. But avoid redundant wakeups during normal playback.
if (mpctx->audio_status != STATUS_PLAYING &&
mp_async_queue_is_full(ao_c->ao_queue))
mp_wakeup_core(mpctx);
if (mpctx->audio_status == STATUS_SYNCING && !ao_c->start_pts_known)
return;
if (ao_c->untimed_throttle)
return;
if (!mp_pin_can_transfer_data(ao_c->queue_filter->pins[0], f->ppins[0]))
return;
struct mp_frame frame = mp_pin_out_read(f->ppins[0]);
if (frame.type == MP_FRAME_AUDIO) {
struct mp_aframe *af = frame.data;
double endpts = get_play_end_pts(mpctx);
if (endpts != MP_NOPTS_VALUE) {
endpts *= mpctx->play_dir;
// Avoid decoding and discarding the entire rest of the file.
if (mp_aframe_get_pts(af) >= endpts) {
mp_pin_out_unread(f->ppins[0], frame);
if (!ao_c->out_eof) {
ao_c->out_eof = true;
mp_pin_in_write(ao_c->queue_filter->pins[0], MP_EOF_FRAME);
}
return;
}
}
double startpts = mpctx->audio_status == STATUS_SYNCING ?
ao_c->start_pts : MP_NOPTS_VALUE;
mp_aframe_clip_timestamps(af, startpts, endpts);
int samples = mp_aframe_get_size(af);
if (!samples) {
mp_filter_internal_mark_progress(f);
mp_frame_unref(&frame);
return;
}
ao_c->out_eof = false;
if (mpctx->audio_status == STATUS_DRAINING ||
mpctx->audio_status == STATUS_EOF)
{
// If a new frame comes decoder/filter EOF, we should preferably
// call get_sync_pts() again, which (at least in obscure situations)
// may require us to wait a while until the sync PTS is known. Our
// code sucks and can't deal with that, so jump through a hoop to
// get things done in the correct order.
mp_pin_out_unread(f->ppins[0], frame);
ao_c->start_pts_known = false;
mpctx->audio_status = STATUS_SYNCING;
mp_wakeup_core(mpctx);
MP_VERBOSE(mpctx, "new audio frame after EOF\n");
return;
}
mpctx->shown_aframes += samples;
double real_samplerate = mp_aframe_get_rate(af) / mpctx->audio_speed;
mpctx->delay += samples / real_samplerate;
ao_c->last_out_pts = mp_aframe_end_pts(af);
update_throttle(mpctx);
// Gapless case: the AO is still playing from previous file. It makes
// no sense to wait, and in fact the "full queue" event we're waiting
// for may never happen, so start immediately.
// If the new audio starts "later" (big video sync offset), transfer
// of data is stopped somewhere else.
if (mpctx->audio_status == STATUS_SYNCING && ao_is_playing(ao_c->ao)) {
mpctx->audio_status = STATUS_READY;
mp_wakeup_core(mpctx);
MP_VERBOSE(mpctx, "previous audio still playing; continuing\n");
}
mp_pin_in_write(ao_c->queue_filter->pins[0], frame);
} else if (frame.type == MP_FRAME_EOF) {
MP_VERBOSE(mpctx, "audio filter EOF\n");
ao_c->out_eof = true;
mp_wakeup_core(mpctx);
mp_pin_in_write(ao_c->queue_filter->pins[0], frame);
mp_filter_internal_mark_progress(f);
} else {
mp_frame_unref(&frame);
}
}
void reload_audio_output(struct MPContext *mpctx)
@ -771,10 +748,70 @@ void reload_audio_output(struct MPContext *mpctx)
mp_wakeup_core(mpctx);
}
// Returns audio start pts for seeking or video sync.
// Returns false if PTS is not known yet.
static bool get_sync_pts(struct MPContext *mpctx, double *pts)
{
struct MPOpts *opts = mpctx->opts;
*pts = MP_NOPTS_VALUE;
if (!opts->initial_audio_sync)
return true;
bool sync_to_video = mpctx->vo_chain && mpctx->video_status != STATUS_EOF &&
!mpctx->vo_chain->is_sparse;
if (sync_to_video) {
if (mpctx->video_status < STATUS_READY)
return false; // wait until we know a video PTS
if (mpctx->video_pts != MP_NOPTS_VALUE)
*pts = mpctx->video_pts - opts->audio_delay;
} else if (mpctx->hrseek_active) {
*pts = mpctx->hrseek_pts;
} else {
// If audio-only is enabled mid-stream during playback, sync accordingly.
*pts = mpctx->playback_pts;
}
return true;
}
// Look whether audio can be started yet - if audio has to start some time
// after video.
static void check_audio_start(struct MPContext *mpctx, bool force)
{
struct ao_chain *ao_c = mpctx->ao_chain;
if (!ao_c || !ao_c->ao || mpctx->audio_status != STATUS_READY)
return;
if (!mpctx->restart_complete && !force)
return;
double pts = MP_NOPTS_VALUE;
if (!get_sync_pts(mpctx, &pts))
return;
double apts = playing_audio_pts(mpctx); // (basically including mpctx->delay)
if (pts != MP_NOPTS_VALUE && apts != MP_NOPTS_VALUE && pts < apts &&
mpctx->video_status != STATUS_EOF)
{
double diff = (apts - pts) / mpctx->opts->playback_speed;
mp_set_timeout(mpctx, diff);
MP_VERBOSE(mpctx, "delaying audio start %f vs. %f, diff=%f\n",
apts, pts, diff);
return;
}
MP_VERBOSE(mpctx, "starting audio playback\n");
ao_start(ao_c->ao);
mpctx->audio_status = STATUS_PLAYING;
if (ao_c->out_eof)
mpctx->audio_status = STATUS_DRAINING;
ao_c->underrun = false;
mp_wakeup_core(mpctx);
}
void fill_audio_out_buffers(struct MPContext *mpctx)
{
struct MPOpts *opts = mpctx->opts;
bool was_eof = mpctx->audio_status == STATUS_EOF;
if (mpctx->ao && ao_query_and_reset_events(mpctx->ao, AO_EVENT_RELOAD))
reload_audio_output(mpctx);
@ -783,6 +820,8 @@ void fill_audio_out_buffers(struct MPContext *mpctx)
AO_EVENT_INITIAL_UNBLOCK))
ao_unblock(mpctx->ao);
update_throttle(mpctx);
struct ao_chain *ao_c = mpctx->ao_chain;
if (!ao_c)
return;
@ -792,30 +831,8 @@ void fill_audio_out_buffers(struct MPContext *mpctx)
return;
}
// (if AO is set due to gapless from previous file, then we can try to
// filter normally until the filter tells us to change the AO)
if (!mpctx->ao) {
// Probe the initial audio format.
mp_pin_out_request_data(ao_c->filter->f->pins[1]);
if (ao_c->filter->ao_needs_update)
reinit_audio_filters_and_output(mpctx);
if (!mpctx->ao_chain)
return;
if (ao_c->filter->got_output_eof &&
mpctx->audio_status != STATUS_EOF)
{
mpctx->audio_status = STATUS_EOF;
MP_VERBOSE(mpctx, "audio EOF without any data\n");
mp_filter_reset(ao_c->filter->f);
encode_lavc_stream_eof(mpctx->encode_lavc_ctx, STREAM_AUDIO);
}
return; // try again next iteration
}
if (ao_c->ao_resume_time > mp_time_sec()) {
double remaining = ao_c->ao_resume_time - mp_time_sec();
mp_set_timeout(mpctx, remaining);
return;
}
if (mpctx->vo_chain && ao_c->track && ao_c->track->dec &&
mp_decoder_wrapper_get_pts_reset(ao_c->track->dec))
@ -823,157 +840,97 @@ void fill_audio_out_buffers(struct MPContext *mpctx)
MP_WARN(mpctx, "Reset playback due to audio timestamp reset.\n");
reset_playback_state(mpctx);
mp_wakeup_core(mpctx);
return;
}
int ao_rate;
int ao_format;
struct mp_chmap ao_channels;
ao_get_format(mpctx->ao, &ao_rate, &ao_format, &ao_channels);
int align = af_format_sample_alignment(ao_format);
// If audio is infinitely fast, somehow try keeping approximate A/V sync.
if (mpctx->audio_status == STATUS_PLAYING && ao_untimed(mpctx->ao) &&
mpctx->video_status != STATUS_EOF && mpctx->delay > 0)
return;
int playsize = ao_get_space(mpctx->ao);
if (ao_query_and_reset_events(mpctx->ao, AO_EVENT_UNDERRUN)) {
if (!ao_c->underrun)
MP_WARN(mpctx, "Audio device underrun detected.\n");
ao_c->underrun = true;
}
// Stop feeding data if an underrun happened. Something else needs to
// "unblock" audio after underrun. handle_update_cache() does this and can
// take the network state into account.
if (ao_c->underrun)
return;
int skip = 0;
bool sync_known = get_sync_samples(mpctx, &skip);
if (skip > 0) {
playsize = MPMIN(skip + 1, MPMAX(playsize, 2500)); // buffer extra data
} else if (skip < 0) {
playsize = MPMAX(1, playsize + skip); // silence will be prepended
}
playsize = playsize / align * align;
int status = mpctx->audio_status >= STATUS_DRAINING ? AD_EOF : AD_OK;
bool working = false;
if (playsize > mp_audio_buffer_samples(ao_c->ao_buffer)) {
status = filter_audio(mpctx, ao_c->ao_buffer, playsize);
if (ao_c->filter->ao_needs_update) {
reinit_audio_filters_and_output(mpctx);
mp_wakeup_core(mpctx);
return; // retry on next iteration
}
if (status == AD_WAIT)
return;
working = true;
}
// If EOF was reached before, but now something can be decoded, try to
// restart audio properly. This helps with video files where audio starts
// later. Retrying is needed to get the correct sync PTS.
if (mpctx->audio_status >= STATUS_DRAINING &&
mp_audio_buffer_samples(ao_c->ao_buffer) > 0)
{
mpctx->audio_status = STATUS_SYNCING;
return; // retry on next iteration
}
bool end_sync = false;
if (skip >= 0) {
int max = mp_audio_buffer_samples(ao_c->ao_buffer);
mp_audio_buffer_skip(ao_c->ao_buffer, MPMIN(skip, max));
// If something is left, we definitely reached the target time.
end_sync |= sync_known && skip < max;
working |= skip > 0;
} else if (skip < 0) {
if (-skip > playsize) { // heuristic against making the buffer too large
ao_reset(mpctx->ao); // some AOs repeat data on underflow
mpctx->audio_status = STATUS_DRAINING;
mpctx->delay = 0;
return;
}
mp_audio_buffer_prepend_silence(ao_c->ao_buffer, -skip);
end_sync = true;
}
if (mpctx->audio_status == STATUS_SYNCING) {
if (end_sync)
mpctx->audio_status = STATUS_FILLING;
if (status != AD_OK && !mp_audio_buffer_samples(ao_c->ao_buffer))
mpctx->audio_status = STATUS_EOF;
if (working || end_sync)
double pts;
bool ok = get_sync_pts(mpctx, &pts);
// If the AO is still playing from the previous file (due to gapless),
// but if video is active, this may not work if audio starts later than
// video, and gapless has no advantages anyway. So block doing anything
// until the old audio is fully played.
// (Buggy if AO underruns.)
if (mpctx->ao && ao_is_playing(mpctx->ao) &&
mpctx->video_status != STATUS_EOF)
ok = false;
if (ao_c->start_pts_known != ok || ao_c->start_pts != pts) {
ao_c->start_pts_known = ok;
ao_c->start_pts = pts;
mp_filter_wakeup(ao_c->ao_filter);
}
if (ao_c->ao && mp_async_queue_is_full(ao_c->ao_queue)) {
mpctx->audio_status = STATUS_READY;
mp_wakeup_core(mpctx);
return; // continue on next iteration
MP_VERBOSE(mpctx, "audio ready\n");
} else if (ao_c->out_eof) {
// Force playback start early.
mpctx->audio_status = STATUS_READY;
mp_wakeup_core(mpctx);
MP_VERBOSE(mpctx, "audio ready (and EOF)\n");
}
}
assert(mpctx->audio_status >= STATUS_FILLING);
if (ao_c->ao && !ao_is_playing(ao_c->ao) && !ao_c->underrun &&
(mpctx->audio_status == STATUS_PLAYING ||
mpctx->audio_status == STATUS_DRAINING))
{
// Should be playing, but somehow isn't.
// We already have as much data as the audio device wants, and can start
// writing it any time.
if (mpctx->audio_status == STATUS_FILLING)
mpctx->audio_status = STATUS_READY;
// Even if we're done decoding and syncing, let video start first - this is
// required, because sending audio to the AO already starts playback.
if (mpctx->audio_status == STATUS_READY) {
// Warning: relies on handle_playback_restart() being called afterwards.
return;
}
bool audio_eof = status == AD_EOF;
bool partial_fill = false;
int playflags = 0;
if (playsize > mp_audio_buffer_samples(ao_c->ao_buffer)) {
playsize = mp_audio_buffer_samples(ao_c->ao_buffer);
partial_fill = true;
}
audio_eof &= partial_fill;
if (audio_eof && playsize < align)
playsize = 0;
// With gapless audio, delay this to ao_uninit. There must be only
// 1 final chunk, and that is handled when calling ao_uninit().
// If video is still on-going, trying to do gapless is pointless, as video
// will have to continue for a while with audio stopped (but still try to
// do it if gapless is forced, mostly for testing).
if (audio_eof && (!opts->gapless_audio ||
(opts->gapless_audio <= 0 && mpctx->video_status != STATUS_EOF)))
playflags |= PLAYER_FINAL_CHUNK;
uint8_t **planes;
int samples;
mp_audio_buffer_peek(ao_c->ao_buffer, &planes, &samples);
if (audio_eof || samples >= align)
samples = samples / align * align;
samples = MPMIN(samples, mpctx->paused ? 0 : playsize);
int played = write_to_ao(mpctx, planes, samples, playflags);
assert(played >= 0 && played <= samples);
mp_audio_buffer_skip(ao_c->ao_buffer, played);
mpctx->audio_status = STATUS_PLAYING;
if (audio_eof && !playsize) {
mpctx->audio_status = STATUS_DRAINING;
// Wait until the AO has played all queued data. In the gapless case,
// we trigger EOF immediately, and let it play asynchronously.
if (ao_eof_reached(mpctx->ao) || opts->gapless_audio) {
if (ao_c->out_eof && !mp_async_queue_get_frames(ao_c->ao_queue)) {
MP_VERBOSE(mpctx, "AO signaled EOF (while in state %s)\n",
mp_status_str(mpctx->audio_status));
mpctx->audio_status = STATUS_EOF;
if (!was_eof) {
MP_VERBOSE(mpctx, "audio EOF reached\n");
mp_wakeup_core(mpctx);
// stops untimed AOs, stops pull AOs from streaming silence
ao_reset(ao_c->ao);
} else {
if (!ao_c->ao_underrun) {
MP_WARN(mpctx, "Audio device underrun detected.\n");
ao_c->ao_underrun = true;
mp_wakeup_core(mpctx);
ao_c->underrun = true;
}
// Wait until buffers are filled before recovering underrun.
if (ao_c->out_eof || mp_async_queue_is_full(ao_c->ao_queue)) {
MP_VERBOSE(mpctx, "restarting audio after underrun\n");
ao_start(mpctx->ao_chain->ao);
ao_c->ao_underrun = false;
ao_c->underrun = false;
mp_wakeup_core(mpctx);
encode_lavc_stream_eof(mpctx->encode_lavc_ctx, STREAM_AUDIO);
}
}
}
if (mpctx->audio_status == STATUS_PLAYING && ao_c->out_eof) {
mpctx->audio_status = STATUS_DRAINING;
MP_VERBOSE(mpctx, "audio draining\n");
mp_wakeup_core(mpctx);
}
if (ao_c->ao && mpctx->audio_status == STATUS_DRAINING) {
// Wait until the AO has played all queued data. In the gapless case,
// we trigger EOF immediately, and let it play asynchronously.
if (!ao_is_playing(ao_c->ao) ||
(opts->gapless_audio && !ao_untimed(ao_c->ao)))
{
MP_VERBOSE(mpctx, "audio EOF reached\n");
mpctx->audio_status = STATUS_EOF;
mp_wakeup_core(mpctx);
encode_lavc_stream_eof(mpctx->encode_lavc_ctx, STREAM_AUDIO);
}
}
check_audio_start(mpctx, false);
}
void audio_start_ao(struct MPContext *mpctx)
{
assert(mpctx->audio_status == STATUS_READY);
check_audio_start(mpctx, true);
}
// Drop data queued for output, or which the AO is currently outputting.

View File

@ -190,28 +190,33 @@ struct vo_chain {
// Like vo_chain, for audio.
struct ao_chain {
struct mp_log *log;
struct MPContext *mpctx;
bool spdif_passthrough, spdif_failed;
struct mp_output_chain *filter;
struct ao *ao;
struct mp_audio_buffer *ao_buffer;
struct mp_async_queue *ao_queue;
struct mp_filter *queue_filter;
struct mp_filter *ao_filter;
double ao_resume_time;
// 1-element output frame queue.
struct mp_aframe *output_frame;
bool out_eof;
double last_out_pts;
double start_pts;
bool start_pts_known;
struct track *track;
struct mp_pin *filter_src;
struct mp_pin *dec_src;
double delay;
bool untimed_throttle;
bool underrun;
bool ao_underrun; // last known AO state
bool underrun; // for cache pause logic
};
/* Note that playback can be paused, stopped, etc. at any time. While paused,
@ -223,7 +228,6 @@ struct ao_chain {
enum playback_status {
// code may compare status values numerically
STATUS_SYNCING, // seeking for a position to resume
STATUS_FILLING, // decoding more data (so you start with full buffers)
STATUS_READY, // buffers full, playback can be started any time
STATUS_PLAYING, // normal playback
STATUS_DRAINING, // decoding has ended; still playing out queued buffers
@ -500,6 +504,7 @@ void reinit_audio_chain_src(struct MPContext *mpctx, struct track *track);
void audio_update_volume(struct MPContext *mpctx);
void audio_update_balance(struct MPContext *mpctx);
void reload_audio_output(struct MPContext *mpctx);
void audio_start_ao(struct MPContext *mpctx);
// configfiles.c
void mp_parse_cfgfiles(struct MPContext *mpctx);

View File

@ -312,7 +312,6 @@ const char *mp_status_str(enum playback_status st)
{
switch (st) {
case STATUS_SYNCING: return "syncing";
case STATUS_FILLING: return "filling";
case STATUS_READY: return "ready";
case STATUS_PLAYING: return "playing";
case STATUS_DRAINING: return "draining";

View File

@ -31,6 +31,7 @@
#include "common/recorder.h"
#include "common/stats.h"
#include "filters/f_decoder_wrapper.h"
#include "filters/filter_internal.h"
#include "options/m_config_frontend.h"
#include "options/m_property.h"
#include "common/playlist.h"
@ -161,13 +162,8 @@ void set_pause_state(struct MPContext *mpctx, bool user_pause)
if (internal_paused != mpctx->paused) {
mpctx->paused = internal_paused;
if (mpctx->ao && mpctx->ao_chain) {
if (internal_paused) {
ao_pause(mpctx->ao);
} else {
ao_resume(mpctx->ao);
}
}
if (mpctx->ao && mpctx->ao_chain)
ao_set_paused(mpctx->ao, internal_paused);
if (mpctx->video_out)
vo_set_paused(mpctx->video_out, internal_paused);
@ -956,8 +952,8 @@ static void handle_keep_open(struct MPContext *mpctx)
seek_to_last_frame(mpctx);
}
if (opts->keep_open_pause) {
if (mpctx->ao)
ao_drain(mpctx->ao);
if (mpctx->ao && ao_is_playing(mpctx->ao))
return;
set_pause_state(mpctx, true);
}
}
@ -1122,10 +1118,7 @@ static void handle_playback_restart(struct MPContext *mpctx)
return;
}
MP_DBG(mpctx, "starting audio playback\n");
mpctx->audio_status = STATUS_PLAYING;
fill_audio_out_buffers(mpctx); // actually play prepared buffer
mp_wakeup_core(mpctx);
audio_start_ao(mpctx);
}
if (!mpctx->restart_complete) {

View File

@ -278,8 +278,11 @@ void reinit_video_chain_src(struct MPContext *mpctx, struct track *track)
vo_set_paused(vo_c->vo, get_internal_paused(mpctx));
// If we switch on video again, ensure audio position matches up.
if (mpctx->ao_chain)
if (mpctx->ao_chain && mpctx->ao_chain->ao) {
ao_reset(mpctx->ao_chain->ao);
mpctx->ao_chain->start_pts_known = false;
mpctx->audio_status = STATUS_SYNCING;
}
reset_video_state(mpctx);
reset_subtitle_state(mpctx);