Add new Media::Audio::Instance for audio tracks.

Move some audio-related code from Media::Player to Media::Audio.
This commit is contained in:
John Preston 2017-05-03 14:36:39 +03:00
parent 11525a1e50
commit 6f89d01452
22 changed files with 909 additions and 355 deletions

View File

@ -2434,7 +2434,7 @@ namespace {
void playSound() {
if (Global::SoundNotify() && !Platform::Notifications::SkipAudio()) {
Media::Player::PlayNotify();
Media::Audio::PlayNotify();
}
}

View File

@ -319,10 +319,7 @@ void Application::closeApplication() {
if (App::launchState() == App::QuitProcessed) return;
App::setLaunchState(App::QuitProcessed);
if (_messengerInstance) {
Messenger::Instance().prepareToDestroy();
_messengerInstance.reset();
}
_messengerInstance.reset();
Sandbox::finish();

View File

@ -23,12 +23,12 @@ Copyright (c) 2014-2017 John Preston, https://desktop.telegram.org
#include "media/media_audio_ffmpeg_loader.h"
#include "media/media_child_ffmpeg_loader.h"
#include "media/media_audio_loaders.h"
#include "media/media_audio_track.h"
#include "platform/platform_audio.h"
#include "base/task_queue.h"
#include <AL/al.h>
#include <AL/alc.h>
#define AL_ALEXT_PROTOTYPES
#include <AL/alext.h>
#include <numeric>
@ -36,27 +36,6 @@ Copyright (c) 2014-2017 John Preston, https://desktop.telegram.org
Q_DECLARE_METATYPE(AudioMsgId);
Q_DECLARE_METATYPE(VoiceWaveform);
extern "C" {
#ifdef Q_OS_MAC
#include <iconv.h>
#undef iconv_open
#undef iconv
#undef iconv_close
iconv_t iconv_open(const char* tocode, const char* fromcode) {
return libiconv_open(tocode, fromcode);
}
size_t iconv(iconv_t cd, char** inbuf, size_t *inbytesleft, char** outbuf, size_t *outbytesleft) {
return libiconv(cd, inbuf, inbytesleft, outbuf, outbytesleft);
}
int iconv_close(iconv_t cd) {
return libiconv_close(cd);
}
#endif // Q_OS_MAC
} // extern "C"
namespace {
QMutex AudioMutex;
@ -69,16 +48,111 @@ auto suppressSongGain = 1.;
} // namespace
namespace Media {
namespace Player {
namespace Audio {
namespace {
constexpr auto kVideoVolumeRound = 10000;
constexpr auto kPreloadSamples = 2LL * 48000; // preload next part if less than 2 seconds remains
constexpr auto kFadeDuration = TimeMs(500);
constexpr auto kCheckPlaybackPositionTimeout = TimeMs(100); // 100ms per check audio position
constexpr auto kCheckPlaybackPositionDelta = 2400LL; // update position called each 2400 samples
constexpr auto kCheckFadingTimeout = TimeMs(7); // 7ms
constexpr auto kDetachDeviceTimeout = TimeMs(500); // destroy the audio device after 500ms of silence
Player::Mixer *MixerInstance = nullptr;
// Thread: Any.
bool ContextErrorHappened() {
ALenum errCode;
if ((errCode = alcGetError(AudioDevice)) != ALC_NO_ERROR) {
LOG(("Audio Context Error: %1, %2").arg(errCode).arg((const char *)alcGetString(AudioDevice, errCode)));
return true;
}
return false;
}
// Thread: Any.
bool PlaybackErrorHappened() {
ALenum errCode;
if ((errCode = alGetError()) != AL_NO_ERROR) {
LOG(("Audio Playback Error: %1, %2").arg(errCode).arg((const char *)alGetString(errCode)));
return true;
}
return false;
}
void EnumeratePlaybackDevices() {
auto deviceNames = QStringList();
auto devices = alcGetString(nullptr, ALC_DEVICE_SPECIFIER);
t_assert(devices != nullptr);
while (*devices != 0) {
auto deviceName8Bit = QByteArray(devices);
auto deviceName = QString::fromLocal8Bit(deviceName8Bit);
deviceNames.append(deviceName);
devices += deviceName8Bit.size() + 1;
}
LOG(("Audio Playback Devices: %1").arg(deviceNames.join(';')));
if (auto device = alcGetString(nullptr, ALC_DEFAULT_DEVICE_SPECIFIER)) {
LOG(("Audio Playback Default Device: %1").arg(QString::fromLocal8Bit(device)));
} else {
LOG(("Audio Playback Default Device: (null)"));
}
}
void EnumerateCaptureDevices() {
auto deviceNames = QStringList();
auto devices = alcGetString(nullptr, ALC_CAPTURE_DEVICE_SPECIFIER);
t_assert(devices != nullptr);
while (*devices != 0) {
auto deviceName8Bit = QByteArray(devices);
auto deviceName = QString::fromLocal8Bit(deviceName8Bit);
deviceNames.append(deviceName);
devices += deviceName8Bit.size() + 1;
}
LOG(("Audio Capture Devices: %1").arg(deviceNames.join(';')));
if (auto device = alcGetString(nullptr, ALC_CAPTURE_DEFAULT_DEVICE_SPECIFIER)) {
LOG(("Audio Capture Default Device: %1").arg(QString::fromLocal8Bit(device)));
} else {
LOG(("Audio Capture Default Device: (null)"));
}
}
// Thread: Any. Must be locked: AudioMutex.
void DestroyPlaybackDevice() {
if (AudioContext) {
alcMakeContextCurrent(nullptr);
alcDestroyContext(AudioContext);
AudioContext = nullptr;
}
if (AudioDevice) {
alcCloseDevice(AudioDevice);
AudioDevice = nullptr;
}
}
// Thread: Any. Must be locked: AudioMutex.
bool CreatePlaybackDevice() {
if (AudioDevice) return true;
AudioDevice = alcOpenDevice(nullptr);
if (!AudioDevice) {
LOG(("Audio Error: Could not create default playback device, enumerating.."));
EnumeratePlaybackDevices();
return false;
}
ALCint attributes[] = { ALC_STEREO_SOURCES, 128, 0 };
AudioContext = alcCreateContext(AudioDevice, attributes);
alcMakeContextCurrent(AudioContext);
if (ContextErrorHappened()) {
DestroyPlaybackDevice();
return false;
}
ALfloat v[] = { 0.f, 0.f, -1.f, 0.f, 1.f, 0.f };
alListener3f(AL_POSITION, 0.f, 0.f, 0.f);
alListener3f(AL_VELOCITY, 0.f, 0.f, 0.f);
alListenerfv(AL_ORIENTATION, v);
alDistanceModel(AL_NONE);
return true;
}
struct NotifySound {
QByteArray data;
@ -92,6 +166,7 @@ struct NotifySound {
};
NotifySound DefaultNotify;
// Thread: Main. Must be locked: AudioMutex.
void PrepareNotifySound() {
auto content = ([] {
QFile soundFile(":/gui/art/newmsg.wav");
@ -145,17 +220,17 @@ void PrepareNotifySound() {
auto format = ALenum(0);
switch (bytesPerSample) {
case 1:
switch (numChannels) {
case 1: format = AL_FORMAT_MONO8; break;
case 2: format = AL_FORMAT_STEREO8; break;
}
switch (numChannels) {
case 1: format = AL_FORMAT_MONO8; break;
case 2: format = AL_FORMAT_STEREO8; break;
}
break;
case 2:
switch (numChannels) {
case 1: format = AL_FORMAT_MONO16; break;
case 2: format = AL_FORMAT_STEREO16; break;
}
switch (numChannels) {
case 1: format = AL_FORMAT_MONO16; break;
case 2: format = AL_FORMAT_STEREO16; break;
}
break;
}
t_assert(format != 0);
@ -168,66 +243,6 @@ void PrepareNotifySound() {
DefaultNotify.lengthMs = (numSamples * 1000LL / sampleRate);
}
base::Observable<AudioMsgId> UpdatedObservable;
Mixer *MixerInstance = nullptr;
bool ContextErrorHappened() {
ALenum errCode;
if ((errCode = alcGetError(AudioDevice)) != ALC_NO_ERROR) {
LOG(("Audio Context Error: %1, %2").arg(errCode).arg((const char *)alcGetString(AudioDevice, errCode)));
return true;
}
return false;
}
bool PlaybackErrorHappened() {
ALenum errCode;
if ((errCode = alGetError()) != AL_NO_ERROR) {
LOG(("Audio Playback Error: %1, %2").arg(errCode).arg((const char *)alGetString(errCode)));
return true;
}
return false;
}
void EnumeratePlaybackDevices() {
auto deviceNames = QStringList();
auto devices = alcGetString(nullptr, ALC_DEVICE_SPECIFIER);
t_assert(devices != nullptr);
while (*devices != 0) {
auto deviceName8Bit = QByteArray(devices);
auto deviceName = QString::fromLocal8Bit(deviceName8Bit);
deviceNames.append(deviceName);
devices += deviceName8Bit.size() + 1;
}
LOG(("Audio Playback Devices: %1").arg(deviceNames.join(';')));
if (auto device = alcGetString(nullptr, ALC_DEFAULT_DEVICE_SPECIFIER)) {
LOG(("Audio Playback Default Device: %1").arg(QString::fromLocal8Bit(device)));
} else {
LOG(("Audio Playback Default Device: (null)"));
}
}
void EnumerateCaptureDevices() {
auto deviceNames = QStringList();
auto devices = alcGetString(nullptr, ALC_CAPTURE_DEVICE_SPECIFIER);
t_assert(devices != nullptr);
while (*devices != 0) {
auto deviceName8Bit = QByteArray(devices);
auto deviceName = QString::fromLocal8Bit(deviceName8Bit);
deviceNames.append(deviceName);
devices += deviceName8Bit.size() + 1;
}
LOG(("Audio Capture Devices: %1").arg(deviceNames.join(';')));
if (auto device = alcGetString(nullptr, ALC_CAPTURE_DEFAULT_DEVICE_SPECIFIER)) {
LOG(("Audio Capture Default Device: %1").arg(QString::fromLocal8Bit(device)));
} else {
LOG(("Audio Capture Default Device: (null)"));
}
}
ALuint CreateSource() {
auto source = ALuint(0);
alGenSources(1, &source);
@ -257,11 +272,11 @@ void CreateDefaultNotify() {
alSourcei(DefaultNotify.source, AL_BUFFER, DefaultNotify.buffer);
}
// can be called at any moment when audio error
void CloseAudioPlaybackDevice() {
// Thread: Main. Must be locked: AudioMutex.
void ClosePlaybackDevice() {
if (!AudioDevice) return;
LOG(("Audio Info: closing audio playback device"));
LOG(("Audio Info: Closing audio playback device."));
if (alIsSource(DefaultNotify.source)) {
alSourceStop(DefaultNotify.source);
alSourcei(DefaultNotify.source, AL_BUFFER, AL_NONE);
@ -271,30 +286,24 @@ void CloseAudioPlaybackDevice() {
DefaultNotify.buffer = 0;
DefaultNotify.source = 0;
if (mixer()) {
mixer()->detachTracks();
if (Player::mixer()) {
Player::mixer()->detachTracks();
}
Current().detachTracks();
if (AudioContext) {
alcMakeContextCurrent(nullptr);
alcDestroyContext(AudioContext);
AudioContext = nullptr;
}
if (AudioDevice) {
alcCloseDevice(AudioDevice);
AudioDevice = nullptr;
}
DestroyPlaybackDevice();
}
} // namespace
void InitAudio() {
// Thread: Main.
void Start() {
t_assert(AudioDevice == nullptr);
qRegisterMetaType<AudioMsgId>();
qRegisterMetaType<VoiceWaveform>();
// No sync required yet.
PrepareNotifySound();
auto loglevel = getenv("ALSOFT_LOGLEVEL");
@ -303,83 +312,89 @@ void InitAudio() {
EnumeratePlaybackDevices();
EnumerateCaptureDevices();
MixerInstance = new Mixer();
MixerInstance = new Player::Mixer();
Platform::Audio::Init();
}
void DeInitAudio() {
// Thread: Main.
void Finish() {
Platform::Audio::DeInit();
delete base::take(MixerInstance);
CloseAudioPlaybackDevice();
// No sync required already.
ClosePlaybackDevice();
}
base::Observable<AudioMsgId> &Updated() {
return UpdatedObservable;
// Thread: Main. Locks: AudioMutex.
bool IsAttachedToDevice() {
QMutexLocker lock(&AudioMutex);
return (AudioDevice != nullptr);
}
bool CreateAudioPlaybackDevice() {
if (AudioDevice) return true;
// Thread: Any. Must be locked: AudioMutex.
bool AttachToDevice() {
if (AudioDevice) {
return true;
}
LOG(("Audio Info: recreating audio device and reattaching the tracks"));
AudioDevice = alcOpenDevice(nullptr);
CreatePlaybackDevice();
if (!AudioDevice) {
LOG(("Audio Error: Could not create default playback device, enumerating.."));
EnumeratePlaybackDevices();
return false;
}
ALCint attributes[] = { ALC_STEREO_SOURCES, 8, 0 };
AudioContext = alcCreateContext(AudioDevice, attributes);
alcMakeContextCurrent(AudioContext);
if (ContextErrorHappened()) {
CloseAudioPlaybackDevice();
return false;
if (auto m = Player::mixer()) {
m->reattachTracks();
emit m->faderOnTimer();
}
ALfloat v[] = { 0.f, 0.f, -1.f, 0.f, 1.f, 0.f };
alListener3f(AL_POSITION, 0.f, 0.f, 0.f);
alListener3f(AL_VELOCITY, 0.f, 0.f, 0.f);
alListenerfv(AL_ORIENTATION, v);
alDistanceModel(AL_NONE);
base::TaskQueue::Main().Put([] {
Current().reattachTracks();
});
return true;
}
void DetachFromDeviceByTimer() {
QMutexLocker lock(&AudioMutex);
if (mixer()) {
mixer()->detachFromDeviceByTimer();
}
void ScheduleDetachFromDeviceSafe() {
base::TaskQueue::Main().Put([] {
Current().scheduleDetachFromDevice();
});
}
void DetachFromDevice() {
QMutexLocker lock(&AudioMutex);
CloseAudioPlaybackDevice();
if (mixer()) {
mixer()->reattachIfNeeded();
}
void ScheduleDetachIfNotUsedSafe() {
base::TaskQueue::Main().Put([] {
Current().scheduleDetachIfNotUsed();
});
}
void StopDetachIfNotUsedSafe() {
base::TaskQueue::Main().Put([] {
Current().stopDetachIfNotUsed();
});
}
// Thread: Main. Locks: AudioMutex.
void PlayNotify() {
QMutexLocker lock(&AudioMutex);
if (!mixer()) return;
auto m = Player::mixer();
if (!m) return;
mixer()->reattachTracks();
AttachToDevice();
if (!AudioDevice) return;
CreateDefaultNotify();
alSourcePlay(DefaultNotify.source);
if (PlaybackErrorHappened()) {
CloseAudioPlaybackDevice();
ClosePlaybackDevice();
return;
}
emit mixer()->suppressAll();
emit mixer()->faderOnTimer();
emit m->suppressAll();
emit m->faderOnTimer();
}
// Thread: Any. Must be locked: AudioMutex.
bool NotifyIsPlaying() {
if (alIsSource(DefaultNotify.source)) {
ALint state = AL_INITIAL;
@ -391,6 +406,26 @@ bool NotifyIsPlaying() {
return false;
}
} // namespace Audio
namespace Player {
namespace {
constexpr auto kVideoVolumeRound = 10000;
constexpr auto kPreloadSamples = 2LL * 48000; // preload next part if less than 2 seconds remains
constexpr auto kFadeDuration = TimeMs(500);
constexpr auto kCheckPlaybackPositionTimeout = TimeMs(100); // 100ms per check audio position
constexpr auto kCheckPlaybackPositionDelta = 2400LL; // update position called each 2400 samples
constexpr auto kCheckFadingTimeout = TimeMs(7); // 7ms
base::Observable<AudioMsgId> UpdatedObservable;
} // namespace
base::Observable<AudioMsgId> &Updated() {
return UpdatedObservable;
}
float64 ComputeVolume(AudioMsgId::Type type) {
switch (type) {
case AudioMsgId::Type::Voice: return suppressAllGain;
@ -401,7 +436,7 @@ float64 ComputeVolume(AudioMsgId::Type type) {
}
Mixer *mixer() {
return MixerInstance;
return Audio::MixerInstance;
}
void Mixer::Track::createStream() {
@ -582,6 +617,7 @@ Mixer::Mixer()
_faderThread.start();
}
// Thread: Main. Locks: AudioMutex.
Mixer::~Mixer() {
{
QMutexLocker lock(&AudioMutex);
@ -592,8 +628,8 @@ Mixer::~Mixer() {
}
_videoTrack.clear();
CloseAudioPlaybackDevice();
MixerInstance = nullptr;
Audio::ClosePlaybackDevice();
Audio::MixerInstance = nullptr;
}
_faderThread.quit();
@ -661,12 +697,12 @@ void Mixer::resetFadeStartPosition(AudioMsgId::Type type, int positionInBuffered
if (!track) return;
if (positionInBuffered < 0) {
reattachTracks();
Audio::AttachToDevice();
if (track->isStreamCreated()) {
ALint currentPosition = 0;
alGetSourcei(track->stream.source, AL_SAMPLE_OFFSET, &currentPosition);
if (Media::Player::PlaybackErrorHappened()) {
if (Audio::PlaybackErrorHappened()) {
setStoppedState(track, State::StoppedAtError);
onError(track->state.id);
return;
@ -717,7 +753,7 @@ void Mixer::play(const AudioMsgId &audio, int64 position) {
auto notLoadedYet = false;
{
QMutexLocker lock(&AudioMutex);
reattachTracks();
Audio::AttachToDevice();
if (!AudioDevice) return;
bool fadedStart = false;
@ -906,12 +942,11 @@ void Mixer::resumeFromVideo(uint64 videoPlayId) {
case State::Pausing:
case State::Paused:
case State::PausedAtEnd: {
reattachTracks();
if (track->state.state == State::Paused) {
// This calls reattachTracks().
// This calls Audio::AttachToDevice().
resetFadeStartPosition(type);
} else {
reattachTracks();
Audio::AttachToDevice();
if (track->state.state == State::PausedAtEnd) {
if (track->isStreamCreated()) {
alSourcei(track->stream.source, AL_SAMPLE_OFFSET, qMax(track->state.position - track->bufferedPosition, 0LL));
@ -984,7 +1019,7 @@ void Mixer::videoSoundProgress(const AudioMsgId &audio) {
}
bool Mixer::checkCurrentALError(AudioMsgId::Type type) {
if (!Media::Player::PlaybackErrorHappened()) return true;
if (!Audio::PlaybackErrorHappened()) return true;
auto data = trackForType(type);
if (!data) {
@ -1003,7 +1038,7 @@ void Mixer::pauseresume(AudioMsgId::Type type, bool fast) {
case State::Pausing:
case State::Paused:
case State::PausedAtEnd: {
reattachTracks();
Audio::AttachToDevice();
if (current->state.state == State::Paused) {
resetFadeStartPosition(type);
} else if (current->state.state == State::PausedAtEnd) {
@ -1051,7 +1086,7 @@ void Mixer::seek(AudioMsgId::Type type, int64 position) {
auto current = trackForType(type);
auto audio = current->state.id;
reattachTracks();
Audio::AttachToDevice();
auto streamCreated = current->isStreamCreated();
auto fastSeek = (position >= current->bufferedPosition && position < current->bufferedPosition + current->bufferedLength - (current->loaded ? 0 : kDefaultFrequency));
if (!streamCreated) {
@ -1183,10 +1218,7 @@ void Mixer::clearStoppedAtStart(const AudioMsgId &audio) {
}
}
void Mixer::detachFromDeviceByTimer() {
QMetaObject::invokeMethod(_fader, "onDetachFromDeviceByTimer", Qt::QueuedConnection, Q_ARG(bool, true));
}
// Thread: Main. Must be locked: AudioMutex.
void Mixer::detachTracks() {
for (auto i = 0; i != kTogetherLimit; ++i) {
trackForType(AudioMsgId::Type::Voice, i)->detach();
@ -1195,8 +1227,9 @@ void Mixer::detachTracks() {
_videoTrack.detach();
}
// Thread: Main. Must be locked: AudioMutex.
void Mixer::reattachIfNeeded() {
_fader->keepAttachedToDevice();
Audio::Current().stopDetachIfNotUsed();
auto reattachNeeded = [this] {
auto isPlayingState = [](const Track &track) {
@ -1216,24 +1249,18 @@ void Mixer::reattachIfNeeded() {
return isPlayingState(_videoTrack);
};
if (reattachNeeded()) {
reattachTracks();
if (reattachNeeded() || Audio::Current().hasActiveTracks()) {
Audio::AttachToDevice();
}
}
// Thread: Any. Must be locked: AudioMutex.
void Mixer::reattachTracks() {
if (!AudioDevice) {
LOG(("Audio Info: recreating audio device and reattaching the tracks"));
CreateAudioPlaybackDevice();
for (auto i = 0; i != kTogetherLimit; ++i) {
trackForType(AudioMsgId::Type::Voice, i)->reattach(AudioMsgId::Type::Voice);
trackForType(AudioMsgId::Type::Song, i)->reattach(AudioMsgId::Type::Song);
}
_videoTrack.reattach(AudioMsgId::Type::Video);
emit faderOnTimer();
for (auto i = 0; i != kTogetherLimit; ++i) {
trackForType(AudioMsgId::Type::Voice, i)->reattach(AudioMsgId::Type::Voice);
trackForType(AudioMsgId::Type::Song, i)->reattach(AudioMsgId::Type::Song);
}
_videoTrack.reattach(AudioMsgId::Type::Video);
}
void Mixer::setVideoVolume(float64 volume) {
@ -1250,15 +1277,11 @@ Fader::Fader(QThread *thread) : QObject()
, _suppressSongGain(1., 1.) {
moveToThread(thread);
_timer.moveToThread(thread);
_detachFromDeviceTimer.moveToThread(thread);
connect(thread, SIGNAL(started()), this, SLOT(onInit()));
connect(thread, SIGNAL(finished()), this, SLOT(deleteLater()));
_timer.setSingleShot(true);
connect(&_timer, SIGNAL(timeout()), this, SLOT(onTimer()));
_detachFromDeviceTimer.setSingleShot(true);
connect(&_detachFromDeviceTimer, SIGNAL(timeout()), this, SLOT(onDetachFromDeviceTimer()));
}
void Fader::onInit() {
@ -1273,7 +1296,7 @@ void Fader::onTimer() {
auto ms = getms();
auto wasSong = suppressSongGain;
if (_suppressAll) {
auto notifyLengthMs = Media::Player::DefaultNotify.lengthMs;
auto notifyLengthMs = Audio::DefaultNotify.lengthMs;
auto wasAudio = suppressAllGain;
if (ms >= _suppressAllStart + notifyLengthMs || ms < _suppressAllStart) {
_suppressAll = _suppressAllAnim = false;
@ -1328,17 +1351,18 @@ void Fader::onTimer() {
_songVolumeChanged = _videoVolumeChanged = false;
if (!hasFading && !hasPlaying && Media::Player::NotifyIsPlaying()) {
if (!hasFading && !hasPlaying && Audio::NotifyIsPlaying()) {
hasPlaying = true;
}
if (hasFading) {
_timer.start(kCheckFadingTimeout);
keepAttachedToDevice();
Audio::StopDetachIfNotUsedSafe();
} else if (hasPlaying) {
_timer.start(kCheckPlaybackPositionTimeout);
keepAttachedToDevice();
Audio::StopDetachIfNotUsedSafe();
} else {
onDetachFromDeviceByTimer(false);
LOG(("SCHEDULE DETACHED"));
Audio::ScheduleDetachIfNotUsedSafe();
}
}
@ -1346,7 +1370,7 @@ int32 Fader::updateOnePlayback(Mixer::Track *track, bool &hasPlaying, bool &hasF
bool playing = false, fading = false;
auto errorHappened = [this, track] {
if (PlaybackErrorHappened()) {
if (Audio::PlaybackErrorHappened()) {
setStoppedState(track, State::StoppedAtError);
return true;
}
@ -1466,14 +1490,6 @@ void Fader::setStoppedState(Mixer::Track *track, State state) {
track->state.position = 0;
}
void Fader::onDetachFromDeviceTimer() {
QMutexLocker lock(&_detachFromDeviceMutex);
_detachFromDeviceForce = false;
lock.unlock();
DetachFromDevice();
}
void Fader::onSuppressSong() {
if (!_suppressSong) {
_suppressSong = true;
@ -1511,64 +1527,58 @@ void Fader::onVideoVolumeChanged() {
onTimer();
}
void Fader::keepAttachedToDevice() {
QMutexLocker lock(&_detachFromDeviceMutex);
if (!_detachFromDeviceForce) {
_detachFromDeviceTimer.stop();
}
}
void Fader::onDetachFromDeviceByTimer(bool force) {
QMutexLocker lock(&_detachFromDeviceMutex);
if (force) {
_detachFromDeviceForce = true;
}
if (!_detachFromDeviceTimer.isActive()) {
_detachFromDeviceTimer.start(kDetachDeviceTimeout);
}
}
} // namespace Player
} // namespace Media
namespace internal {
// Thread: Any.
QMutex *audioPlayerMutex() {
return &AudioMutex;
}
// Thread: Any.
bool audioCheckError() {
return !Media::Player::PlaybackErrorHappened();
return !Audio::PlaybackErrorHappened();
}
// Thread: Any. Must be locked: AudioMutex.
bool audioDeviceIsConnected() {
if (!AudioDevice) {
return false;
}
ALint connected = 0;
alcGetIntegerv(AudioDevice, ALC_CONNECTED, 1, &connected);
if (Media::Player::ContextErrorHappened()) {
auto isConnected = ALint(0);
alcGetIntegerv(AudioDevice, ALC_CONNECTED, 1, &isConnected);
if (Audio::ContextErrorHappened()) {
return false;
}
return (connected != 0);
return (isConnected != 0);
}
// Thread: Any. Must be locked: AudioMutex.
bool CheckAudioDeviceConnected() {
if (audioDeviceIsConnected()) {
return true;
}
if (auto mixer = Media::Player::mixer()) {
mixer->detachFromDeviceByTimer();
}
Audio::ScheduleDetachFromDeviceSafe();
return false;
}
// Thread: Main. Locks: AudioMutex.
void DetachFromDevice() {
QMutexLocker lock(&AudioMutex);
Audio::ClosePlaybackDevice();
if (mixer()) {
mixer()->reattachIfNeeded();
}
}
} // namespace internal
} // namespace Player
} // namespace Media
class FFMpegAttributesReader : public AbstractFFMpegLoader {
public:
FFMpegAttributesReader(const FileLocation &file, const QByteArray &data) : AbstractFFMpegLoader(file, data) {
FFMpegAttributesReader(const FileLocation &file, const QByteArray &data) : AbstractFFMpegLoader(file, data, base::byte_vector()) {
}
bool open(qint64 &position) override {
@ -1581,7 +1591,7 @@ public:
int videoStreamId = av_find_best_stream(fmtContext, AVMEDIA_TYPE_VIDEO, -1, -1, &codec, 0);
if (videoStreamId >= 0) {
DEBUG_LOG(("Audio Read Error: Found video stream in file '%1', data size '%2', error %3, %4").arg(file.name()).arg(data.size()).arg(videoStreamId).arg(av_make_error_string(err, sizeof(err), streamId)));
DEBUG_LOG(("Audio Read Error: Found video stream in file '%1', data size '%2', error %3, %4").arg(_file.name()).arg(_data.size()).arg(videoStreamId).arg(av_make_error_string(err, sizeof(err), streamId)));
return false;
}
@ -1687,7 +1697,7 @@ FileLoadTask::Song PrepareForSending(const QString &fname, const QByteArray &dat
class FFMpegWaveformCounter : public FFMpegLoader {
public:
FFMpegWaveformCounter(const FileLocation &file, const QByteArray &data) : FFMpegLoader(file, data) {
FFMpegWaveformCounter(const FileLocation &file, const QByteArray &data) : FFMpegLoader(file, data, base::byte_vector()) {
}
bool open(qint64 &position) override {

View File

@ -26,6 +26,28 @@ struct VideoSoundData;
struct VideoSoundPart;
namespace Media {
namespace Audio {
// Thread: Main.
void Start();
void Finish();
// Thread: Main. Locks: AudioMutex.
bool IsAttachedToDevice();
// Thread: Any. Must be locked: AudioMutex.
bool AttachToDevice();
// Thread: Any.
void ScheduleDetachFromDeviceSafe();
void ScheduleDetachIfNotUsedSafe();
void StopDetachIfNotUsedSafe();
// Thread: Main.
void PlayNotify();
} // namespace Audio
namespace Player {
constexpr auto kDefaultFrequency = 48000; // 48 kHz
@ -35,13 +57,7 @@ constexpr auto kWaveformSamplesCount = 100;
class Fader;
class Loaders;
void InitAudio();
void DeInitAudio();
base::Observable<AudioMsgId> &Updated();
void DetachFromDeviceByTimer();
void PlayNotify();
float64 ComputeVolume(AudioMsgId::Type type);
@ -117,12 +133,16 @@ public:
void clearStoppedAtStart(const AudioMsgId &audio);
void detachFromDeviceByTimer();
// Thread: Main. Must be locked: AudioMutex.
void detachTracks();
// Thread: Main. Must be locked: AudioMutex.
void reattachIfNeeded();
// Thread: Any. Must be locked: AudioMutex.
void reattachTracks();
// Thread safe.
// Thread: Any.
void setVideoVolume(float64 volume);
float64 getVideoVolume() const;
@ -236,7 +256,6 @@ class Fader : public QObject {
public:
Fader(QThread *thread);
void keepAttachedToDevice();
signals:
void error(const AudioMsgId &audio);
@ -245,11 +264,8 @@ signals:
void needToPreload(const AudioMsgId &audio);
public slots:
void onDetachFromDeviceByTimer(bool force);
void onInit();
void onTimer();
void onDetachFromDeviceTimer();
void onSuppressSong();
void onUnsuppressSong();
@ -279,25 +295,27 @@ private:
TimeMs _suppressAllStart = 0;
TimeMs _suppressSongStart = 0;
QTimer _detachFromDeviceTimer;
QMutex _detachFromDeviceMutex;
bool _detachFromDeviceForce = false;
};
FileLoadTask::Song PrepareForSending(const QString &fname, const QByteArray &data);
} // namespace Player
} // namespace Media
namespace internal {
QMutex *audioPlayerMutex();
bool audioCheckError();
// AudioMutex must be locked.
// Thread: Any. Must be locked: AudioMutex.
bool CheckAudioDeviceConnected();
// Thread: Main. Locks: AudioMutex.
void DetachFromDevice();
// Thread: Any.
QMutex *audioPlayerMutex();
// Thread: Any.
bool audioCheckError();
} // namespace internal
} // namespace Player
} // namespace Media
VoiceWaveform audioCountWaveform(const FileLocation &file, const QByteArray &data);

View File

@ -24,8 +24,6 @@ Copyright (c) 2014-2017 John Preston, https://desktop.telegram.org
#include <AL/al.h>
#include <AL/alc.h>
#define AL_ALEXT_PROTOTYPES
#include <AL/alext.h>
#include <numeric>
@ -51,13 +49,13 @@ bool ErrorHappened(ALCdevice *device) {
} // namespace
void Init() {
void Start() {
t_assert(CaptureInstance == nullptr);
CaptureInstance = new Instance();
instance()->check();
}
void DeInit() {
void Finish() {
delete base::take(CaptureInstance);
}

View File

@ -25,8 +25,8 @@ struct AVFrame;
namespace Media {
namespace Capture {
void Init();
void DeInit();
void Start();
void Finish();
class Instance : public QObject {
Q_OBJECT

View File

@ -33,14 +33,16 @@ bool AbstractFFMpegLoader::open(qint64 &position) {
char err[AV_ERROR_MAX_STRING_SIZE] = { 0 };
ioBuffer = (uchar*)av_malloc(AVBlockSize);
if (data.isEmpty()) {
ioContext = avio_alloc_context(ioBuffer, AVBlockSize, 0, reinterpret_cast<void*>(this), &AbstractFFMpegLoader::_read_file, 0, &AbstractFFMpegLoader::_seek_file);
} else {
if (!_data.isEmpty()) {
ioContext = avio_alloc_context(ioBuffer, AVBlockSize, 0, reinterpret_cast<void*>(this), &AbstractFFMpegLoader::_read_data, 0, &AbstractFFMpegLoader::_seek_data);
} else if (!_bytes.empty()) {
ioContext = avio_alloc_context(ioBuffer, AVBlockSize, 0, reinterpret_cast<void*>(this), &AbstractFFMpegLoader::_read_bytes, 0, &AbstractFFMpegLoader::_seek_bytes);
} else {
ioContext = avio_alloc_context(ioBuffer, AVBlockSize, 0, reinterpret_cast<void*>(this), &AbstractFFMpegLoader::_read_file, 0, &AbstractFFMpegLoader::_seek_file);
}
fmtContext = avformat_alloc_context();
if (!fmtContext) {
DEBUG_LOG(("Audio Read Error: Unable to avformat_alloc_context for file '%1', data size '%2'").arg(file.name()).arg(data.size()));
DEBUG_LOG(("Audio Read Error: Unable to avformat_alloc_context for file '%1', data size '%2'").arg(_file.name()).arg(_data.size()));
return false;
}
fmtContext->pb = ioContext;
@ -48,19 +50,19 @@ bool AbstractFFMpegLoader::open(qint64 &position) {
if ((res = avformat_open_input(&fmtContext, 0, 0, 0)) < 0) {
ioBuffer = 0;
DEBUG_LOG(("Audio Read Error: Unable to avformat_open_input for file '%1', data size '%2', error %3, %4").arg(file.name()).arg(data.size()).arg(res).arg(av_make_error_string(err, sizeof(err), res)));
DEBUG_LOG(("Audio Read Error: Unable to avformat_open_input for file '%1', data size '%2', error %3, %4").arg(_file.name()).arg(_data.size()).arg(res).arg(av_make_error_string(err, sizeof(err), res)));
return false;
}
_opened = true;
if ((res = avformat_find_stream_info(fmtContext, 0)) < 0) {
DEBUG_LOG(("Audio Read Error: Unable to avformat_find_stream_info for file '%1', data size '%2', error %3, %4").arg(file.name()).arg(data.size()).arg(res).arg(av_make_error_string(err, sizeof(err), res)));
DEBUG_LOG(("Audio Read Error: Unable to avformat_find_stream_info for file '%1', data size '%2', error %3, %4").arg(_file.name()).arg(_data.size()).arg(res).arg(av_make_error_string(err, sizeof(err), res)));
return false;
}
streamId = av_find_best_stream(fmtContext, AVMEDIA_TYPE_AUDIO, -1, -1, &codec, 0);
if (streamId < 0) {
LOG(("Audio Error: Unable to av_find_best_stream for file '%1', data size '%2', error %3, %4").arg(file.name()).arg(data.size()).arg(streamId).arg(av_make_error_string(err, sizeof(err), streamId)));
LOG(("Audio Error: Unable to av_find_best_stream for file '%1', data size '%2', error %3, %4").arg(_file.name()).arg(_data.size()).arg(streamId).arg(av_make_error_string(err, sizeof(err), streamId)));
return false;
}
@ -88,59 +90,92 @@ AbstractFFMpegLoader::~AbstractFFMpegLoader() {
}
int AbstractFFMpegLoader::_read_data(void *opaque, uint8_t *buf, int buf_size) {
AbstractFFMpegLoader *l = reinterpret_cast<AbstractFFMpegLoader*>(opaque);
auto l = reinterpret_cast<AbstractFFMpegLoader*>(opaque);
int32 nbytes = qMin(l->data.size() - l->dataPos, int32(buf_size));
auto nbytes = qMin(l->_data.size() - l->_dataPos, int32(buf_size));
if (nbytes <= 0) {
return 0;
}
memcpy(buf, l->data.constData() + l->dataPos, nbytes);
l->dataPos += nbytes;
memcpy(buf, l->_data.constData() + l->_dataPos, nbytes);
l->_dataPos += nbytes;
return nbytes;
}
int64_t AbstractFFMpegLoader::_seek_data(void *opaque, int64_t offset, int whence) {
AbstractFFMpegLoader *l = reinterpret_cast<AbstractFFMpegLoader*>(opaque);
auto l = reinterpret_cast<AbstractFFMpegLoader*>(opaque);
int32 newPos = -1;
switch (whence) {
case SEEK_SET: newPos = offset; break;
case SEEK_CUR: newPos = l->dataPos + offset; break;
case SEEK_END: newPos = l->data.size() + offset; break;
case SEEK_CUR: newPos = l->_dataPos + offset; break;
case SEEK_END: newPos = l->_data.size() + offset; break;
case AVSEEK_SIZE: {
// Special whence for determining filesize without any seek.
return l->data.size();
return l->_data.size();
} break;
}
if (newPos < 0 || newPos > l->data.size()) {
if (newPos < 0 || newPos > l->_data.size()) {
return -1;
}
l->dataPos = newPos;
return l->dataPos;
l->_dataPos = newPos;
return l->_dataPos;
}
int AbstractFFMpegLoader::_read_bytes(void *opaque, uint8_t *buf, int buf_size) {
auto l = reinterpret_cast<AbstractFFMpegLoader*>(opaque);
auto nbytes = qMin(static_cast<int>(l->_bytes.size()) - l->_dataPos, buf_size);
if (nbytes <= 0) {
return 0;
}
memcpy(buf, l->_bytes.data() + l->_dataPos, nbytes);
l->_dataPos += nbytes;
return nbytes;
}
int64_t AbstractFFMpegLoader::_seek_bytes(void *opaque, int64_t offset, int whence) {
auto l = reinterpret_cast<AbstractFFMpegLoader*>(opaque);
int32 newPos = -1;
switch (whence) {
case SEEK_SET: newPos = offset; break;
case SEEK_CUR: newPos = l->_dataPos + offset; break;
case SEEK_END: newPos = static_cast<int>(l->_bytes.size()) + offset; break;
case AVSEEK_SIZE: {
// Special whence for determining filesize without any seek.
return l->_bytes.size();
} break;
}
if (newPos < 0 || newPos > l->_bytes.size()) {
return -1;
}
l->_dataPos = newPos;
return l->_dataPos;
}
int AbstractFFMpegLoader::_read_file(void *opaque, uint8_t *buf, int buf_size) {
AbstractFFMpegLoader *l = reinterpret_cast<AbstractFFMpegLoader*>(opaque);
return int(l->f.read((char*)(buf), buf_size));
auto l = reinterpret_cast<AbstractFFMpegLoader*>(opaque);
return int(l->_f.read((char*)(buf), buf_size));
}
int64_t AbstractFFMpegLoader::_seek_file(void *opaque, int64_t offset, int whence) {
AbstractFFMpegLoader *l = reinterpret_cast<AbstractFFMpegLoader*>(opaque);
auto l = reinterpret_cast<AbstractFFMpegLoader*>(opaque);
switch (whence) {
case SEEK_SET: return l->f.seek(offset) ? l->f.pos() : -1;
case SEEK_CUR: return l->f.seek(l->f.pos() + offset) ? l->f.pos() : -1;
case SEEK_END: return l->f.seek(l->f.size() + offset) ? l->f.pos() : -1;
case SEEK_SET: return l->_f.seek(offset) ? l->_f.pos() : -1;
case SEEK_CUR: return l->_f.seek(l->_f.pos() + offset) ? l->_f.pos() : -1;
case SEEK_END: return l->_f.seek(l->_f.size() + offset) ? l->_f.pos() : -1;
case AVSEEK_SIZE: {
// Special whence for determining filesize without any seek.
return l->f.size();
return l->_f.size();
} break;
}
return -1;
}
FFMpegLoader::FFMpegLoader(const FileLocation &file, const QByteArray &data) : AbstractFFMpegLoader(file, data) {
FFMpegLoader::FFMpegLoader(const FileLocation &file, const QByteArray &data, base::byte_vector &&bytes) : AbstractFFMpegLoader(file, data, std::move(bytes)) {
frame = av_frame_alloc();
}
@ -156,18 +191,18 @@ bool FFMpegLoader::open(qint64 &position) {
codecContext = avcodec_alloc_context3(nullptr);
if (!codecContext) {
LOG(("Audio Error: Unable to avcodec_alloc_context3 for file '%1', data size '%2'").arg(file.name()).arg(data.size()));
LOG(("Audio Error: Unable to avcodec_alloc_context3 for file '%1', data size '%2'").arg(_file.name()).arg(_data.size()));
return false;
}
if ((res = avcodec_parameters_to_context(codecContext, codecParams)) < 0) {
LOG(("Audio Error: Unable to avcodec_parameters_to_context for file '%1', data size '%2', error %3, %4").arg(file.name()).arg(data.size()).arg(res).arg(av_make_error_string(err, sizeof(err), res)));
LOG(("Audio Error: Unable to avcodec_parameters_to_context for file '%1', data size '%2', error %3, %4").arg(_file.name()).arg(_data.size()).arg(res).arg(av_make_error_string(err, sizeof(err), res)));
return false;
}
av_codec_set_pkt_timebase(codecContext, fmtContext->streams[streamId]->time_base);
av_opt_set_int(codecContext, "refcounted_frames", 1, 0);
if ((res = avcodec_open2(codecContext, codec, 0)) < 0) {
LOG(("Audio Error: Unable to avcodec_open2 for file '%1', data size '%2', error %3, %4").arg(file.name()).arg(data.size()).arg(res).arg(av_make_error_string(err, sizeof(err), res)));
LOG(("Audio Error: Unable to avcodec_open2 for file '%1', data size '%2', error %3, %4").arg(_file.name()).arg(_data.size()).arg(res).arg(av_make_error_string(err, sizeof(err), res)));
return false;
}
@ -213,7 +248,7 @@ bool FFMpegLoader::open(qint64 &position) {
if (sampleSize < 0) {
swrContext = swr_alloc();
if (!swrContext) {
LOG(("Audio Error: Unable to swr_alloc for file '%1', data size '%2'").arg(file.name()).arg(data.size()));
LOG(("Audio Error: Unable to swr_alloc for file '%1', data size '%2'").arg(_file.name()).arg(_data.size()));
return false;
}
int64_t src_ch_layout = layout, dst_ch_layout = AudioToChannelLayout;
@ -229,7 +264,7 @@ bool FFMpegLoader::open(qint64 &position) {
av_opt_set_sample_fmt(swrContext, "out_sample_fmt", dst_sample_fmt, 0);
if ((res = swr_init(swrContext)) < 0) {
LOG(("Audio Error: Unable to swr_init for file '%1', data size '%2', error %3, %4").arg(file.name()).arg(data.size()).arg(res).arg(av_make_error_string(err, sizeof(err), res)));
LOG(("Audio Error: Unable to swr_init for file '%1', data size '%2', error %3, %4").arg(_file.name()).arg(_data.size()).arg(res).arg(av_make_error_string(err, sizeof(err), res)));
return false;
}
@ -241,7 +276,7 @@ bool FFMpegLoader::open(qint64 &position) {
maxResampleSamples = av_rescale_rnd(AVBlockSize / sampleSize, dstRate, srcRate, AV_ROUND_UP);
if ((res = av_samples_alloc_array_and_samples(&dstSamplesData, 0, AudioToChannels, maxResampleSamples, AudioToFormat, 0)) < 0) {
LOG(("Audio Error: Unable to av_samples_alloc for file '%1', data size '%2', error %3, %4").arg(file.name()).arg(data.size()).arg(res).arg(av_make_error_string(err, sizeof(err), res)));
LOG(("Audio Error: Unable to av_samples_alloc for file '%1', data size '%2', error %3, %4").arg(_file.name()).arg(_data.size()).arg(res).arg(av_make_error_string(err, sizeof(err), res)));
return false;
}
}
@ -269,14 +304,14 @@ AudioPlayerLoader::ReadResult FFMpegLoader::readMore(QByteArray &result, int64 &
return ReadResult::EndOfFile;
} else if (res != AVERROR(EAGAIN)) {
char err[AV_ERROR_MAX_STRING_SIZE] = { 0 };
LOG(("Audio Error: Unable to avcodec_receive_frame() file '%1', data size '%2', error %3, %4").arg(file.name()).arg(data.size()).arg(res).arg(av_make_error_string(err, sizeof(err), res)));
LOG(("Audio Error: Unable to avcodec_receive_frame() file '%1', data size '%2', error %3, %4").arg(_file.name()).arg(_data.size()).arg(res).arg(av_make_error_string(err, sizeof(err), res)));
return ReadResult::Error;
}
if ((res = av_read_frame(fmtContext, &avpkt)) < 0) {
if (res != AVERROR_EOF) {
char err[AV_ERROR_MAX_STRING_SIZE] = { 0 };
LOG(("Audio Error: Unable to av_read_frame() file '%1', data size '%2', error %3, %4").arg(file.name()).arg(data.size()).arg(res).arg(av_make_error_string(err, sizeof(err), res)));
LOG(("Audio Error: Unable to av_read_frame() file '%1', data size '%2', error %3, %4").arg(_file.name()).arg(_data.size()).arg(res).arg(av_make_error_string(err, sizeof(err), res)));
return ReadResult::Error;
}
avcodec_send_packet(codecContext, nullptr); // drain
@ -289,7 +324,7 @@ AudioPlayerLoader::ReadResult FFMpegLoader::readMore(QByteArray &result, int64 &
av_packet_unref(&avpkt);
char err[AV_ERROR_MAX_STRING_SIZE] = { 0 };
LOG(("Audio Error: Unable to avcodec_send_packet() file '%1', data size '%2', error %3, %4").arg(file.name()).arg(data.size()).arg(res).arg(av_make_error_string(err, sizeof(err), res)));
LOG(("Audio Error: Unable to avcodec_send_packet() file '%1', data size '%2', error %3, %4").arg(_file.name()).arg(_data.size()).arg(res).arg(av_make_error_string(err, sizeof(err), res)));
// There is a sample voice message where skipping such packet
// results in a crash (read_access to nullptr) in swr_convert().
//if (res == AVERROR_INVALIDDATA) {
@ -312,13 +347,13 @@ AudioPlayerLoader::ReadResult FFMpegLoader::readFromReadyFrame(QByteArray &resul
av_freep(&dstSamplesData[0]);
if ((res = av_samples_alloc(dstSamplesData, 0, AudioToChannels, maxResampleSamples, AudioToFormat, 1)) < 0) {
char err[AV_ERROR_MAX_STRING_SIZE] = { 0 };
LOG(("Audio Error: Unable to av_samples_alloc for file '%1', data size '%2', error %3, %4").arg(file.name()).arg(data.size()).arg(res).arg(av_make_error_string(err, sizeof(err), res)));
LOG(("Audio Error: Unable to av_samples_alloc for file '%1', data size '%2', error %3, %4").arg(_file.name()).arg(_data.size()).arg(res).arg(av_make_error_string(err, sizeof(err), res)));
return ReadResult::Error;
}
}
if ((res = swr_convert(swrContext, dstSamplesData, dstSamples, (const uint8_t**)frame->extended_data, frame->nb_samples)) < 0) {
char err[AV_ERROR_MAX_STRING_SIZE] = { 0 };
LOG(("Audio Error: Unable to swr_convert for file '%1', data size '%2', error %3, %4").arg(file.name()).arg(data.size()).arg(res).arg(av_make_error_string(err, sizeof(err), res)));
LOG(("Audio Error: Unable to swr_convert for file '%1', data size '%2', error %3, %4").arg(_file.name()).arg(_data.size()).arg(res).arg(av_make_error_string(err, sizeof(err), res)));
return ReadResult::Error;
}
int32 resultLen = av_samples_get_buffer_size(0, AudioToChannels, res, AudioToFormat, 1);

View File

@ -34,7 +34,7 @@ extern "C" {
class AbstractFFMpegLoader : public AudioPlayerLoader {
public:
AbstractFFMpegLoader(const FileLocation &file, const QByteArray &data) : AudioPlayerLoader(file, data) {
AbstractFFMpegLoader(const FileLocation &file, const QByteArray &data, base::byte_vector &&bytes) : AudioPlayerLoader(file, data, std::move(bytes)) {
}
bool open(qint64 &position) override;
@ -64,6 +64,8 @@ protected:
private:
static int _read_data(void *opaque, uint8_t *buf, int buf_size);
static int64_t _seek_data(void *opaque, int64_t offset, int whence);
static int _read_bytes(void *opaque, uint8_t *buf, int buf_size);
static int64_t _seek_bytes(void *opaque, int64_t offset, int whence);
static int _read_file(void *opaque, uint8_t *buf, int buf_size);
static int64_t _seek_file(void *opaque, int64_t offset, int whence);
@ -71,7 +73,7 @@ private:
class FFMpegLoader : public AbstractFFMpegLoader {
public:
FFMpegLoader(const FileLocation &file, const QByteArray &data);
FFMpegLoader(const FileLocation &file, const QByteArray &data, base::byte_vector &&bytes);
bool open(qint64 &position) override;

View File

@ -20,20 +20,21 @@ Copyright (c) 2014-2017 John Preston, https://desktop.telegram.org
*/
#include "media/media_audio_loader.h"
AudioPlayerLoader::AudioPlayerLoader(const FileLocation &file, const QByteArray &data)
: file(file)
, data(data) {
AudioPlayerLoader::AudioPlayerLoader(const FileLocation &file, const QByteArray &data, base::byte_vector &&bytes)
: _file(file)
, _data(data)
, _bytes(std::move(bytes)) {
}
AudioPlayerLoader::~AudioPlayerLoader() {
if (access) {
file.accessDisable();
access = false;
if (_access) {
_file.accessDisable();
_access = false;
}
}
bool AudioPlayerLoader::check(const FileLocation &file, const QByteArray &data) {
return this->file == file && this->data.size() == data.size();
return this->_file == file && this->_data.size() == data.size();
}
void AudioPlayerLoader::saveDecodedSamples(QByteArray *samples, int64 *samplesCount) {
@ -59,21 +60,21 @@ bool AudioPlayerLoader::holdsSavedDecodedSamples() const {
}
bool AudioPlayerLoader::openFile() {
if (data.isEmpty()) {
if (f.isOpen()) f.close();
if (!access) {
if (!file.accessEnable()) {
LOG(("Audio Error: could not open file access '%1', data size '%2', error %3, %4").arg(file.name()).arg(data.size()).arg(f.error()).arg(f.errorString()));
if (_data.isEmpty() && _bytes.empty()) {
if (_f.isOpen()) _f.close();
if (!_access) {
if (!_file.accessEnable()) {
LOG(("Audio Error: could not open file access '%1', data size '%2', error %3, %4").arg(_file.name()).arg(_data.size()).arg(_f.error()).arg(_f.errorString()));
return false;
}
access = true;
_access = true;
}
f.setFileName(file.name());
if (!f.open(QIODevice::ReadOnly)) {
LOG(("Audio Error: could not open file '%1', data size '%2', error %3, %4").arg(file.name()).arg(data.size()).arg(f.error()).arg(f.errorString()));
_f.setFileName(_file.name());
if (!_f.open(QIODevice::ReadOnly)) {
LOG(("Audio Error: could not open file '%1', data size '%2', error %3, %4").arg(_file.name()).arg(_data.size()).arg(_f.error()).arg(_f.errorString()));
return false;
}
}
dataPos = 0;
_dataPos = 0;
return true;
}

View File

@ -22,7 +22,7 @@ Copyright (c) 2014-2017 John Preston, https://desktop.telegram.org
class AudioPlayerLoader {
public:
AudioPlayerLoader(const FileLocation &file, const QByteArray &data);
AudioPlayerLoader(const FileLocation &file, const QByteArray &data, base::byte_vector &&bytes);
virtual ~AudioPlayerLoader();
virtual bool check(const FileLocation &file, const QByteArray &data);
@ -46,12 +46,13 @@ public:
bool holdsSavedDecodedSamples() const;
protected:
FileLocation file;
bool access = false;
QByteArray data;
FileLocation _file;
bool _access = false;
QByteArray _data;
base::byte_vector _bytes;
QFile f;
int32 dataPos = 0;
QFile _f;
int _dataPos = 0;
bool openFile();

View File

@ -195,7 +195,7 @@ void Loaders::loadData(AudioMsgId audio, qint64 position) {
}
if (started) {
mixer()->reattachTracks();
Audio::AttachToDevice();
track->started();
if (!internal::audioCheckError()) {
@ -329,7 +329,7 @@ AudioPlayerLoader *Loaders::setupLoader(const AudioMsgId &audio, SetupError &err
_videoLoader = std::make_unique<ChildFFMpegLoader>(track->videoPlayId, std::move(track->videoData));
l = _videoLoader.get();
} else {
*loader = std::make_unique<FFMpegLoader>(track->file, track->data);
*loader = std::make_unique<FFMpegLoader>(track->file, track->data, base::byte_vector());
l = loader->get();
}

View File

@ -0,0 +1,313 @@
/*
This file is part of Telegram Desktop,
the official desktop version of Telegram messaging app, see https://telegram.org
Telegram Desktop is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
It is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
In addition, as a special exception, the copyright holders give permission
to link the code of portions of this program with the OpenSSL library.
Full license: https://github.com/telegramdesktop/tdesktop/blob/master/LICENSE
Copyright (c) 2014-2017 John Preston, https://desktop.telegram.org
*/
#include "media/media_audio_track.h"
#include "media/media_audio_ffmpeg_loader.h"
#include "media/media_audio.h"
#include "messenger.h"
#include <AL/al.h>
#include <AL/alc.h>
#include <AL/alext.h>
namespace Media {
namespace Audio {
namespace {
constexpr auto kMaxFileSize = 10 * 1024 * 1024;
constexpr auto kDetachDeviceTimeout = TimeMs(500); // destroy the audio device after 500ms of silence
ALuint CreateSource() {
auto source = ALuint(0);
alGenSources(1, &source);
alSourcef(source, AL_PITCH, 1.f);
alSourcef(source, AL_GAIN, 1.f);
alSource3f(source, AL_POSITION, 0, 0, 0);
alSource3f(source, AL_VELOCITY, 0, 0, 0);
return source;
}
ALuint CreateBuffer() {
auto buffer = ALuint(0);
alGenBuffers(1, &buffer);
return buffer;
}
} // namespace
Track::Track(gsl::not_null<Instance*> instance) : _instance(instance) {
_instance->registerTrack(this);
}
void Track::fillFromData(base::byte_vector &&data) {
FFMpegLoader loader(FileLocation(), QByteArray(), std::move(data));
auto position = qint64(0);
if (!loader.open(position)) {
_failed = true;
return;
}
do {
auto buffer = QByteArray();
int64 samplesAdded = 0;
auto result = loader.readMore(buffer, samplesAdded);
if (samplesAdded > 0) {
auto bufferBytes = reinterpret_cast<const gsl::byte*>(buffer.constData());
_samplesCount += samplesAdded;
_samples.insert(_samples.end(), bufferBytes, bufferBytes + buffer.size());
}
using Result = AudioPlayerLoader::ReadResult;
switch (result) {
case Result::Error:
case Result::NotYet:
case Result::Wait: {
_failed = true;
} break;
}
if (result != Result::Ok) {
break;
}
} while (true);
_alFormat = loader.format();
_lengthMs = loader.duration();
_sampleRate = loader.frequency();
}
void Track::fillFromFile(const FileLocation &location) {
if (location.accessEnable()) {
fillFromFile(location.name());
location.accessDisable();
} else {
LOG(("Track Error: Could not enable access to file '%1'.").arg(location.name()));
_failed = true;
}
}
void Track::fillFromFile(const QString &filePath) {
QFile f(filePath);
if (f.open(QIODevice::ReadOnly)) {
auto size = f.size();
if (size > 0 && size <= kMaxFileSize) {
auto bytes = base::byte_vector(size);
if (f.read(reinterpret_cast<char*>(bytes.data()), bytes.size()) == bytes.size()) {
fillFromData(std::move(bytes));
} else {
LOG(("Track Error: Could not read %1 bytes from file '%2'.").arg(bytes.size()).arg(filePath));
_failed = true;
}
} else {
LOG(("Track Error: Bad file '%1' size: %2.").arg(filePath).arg(size));
_failed = true;
}
} else {
LOG(("Track Error: Could not open file '%1'.").arg(filePath));
_failed = true;
}
}
void Track::playOnce() {
if (failed() || _samples.empty()) {
_instance->trackFinished().notify(this, true);
return;
}
createSource();
alSourceStop(_alSource);
_looping = false;
alSourcei(_alSource, AL_LOOPING, 0);
_active = true;
alSourcePlay(_alSource);
emit Media::Player::mixer()->faderOnTimer();
}
void Track::playInLoop() {
if (failed()) {
return;
}
createSource();
alSourceStop(_alSource);
_looping = true;
alSourcei(_alSource, AL_LOOPING, 1);
_active = true;
alSourcePlay(_alSource);
emit Media::Player::mixer()->faderOnTimer();
}
void Track::createSource() {
if (alIsSource(_alSource)) {
return;
}
{
QMutexLocker lock(Player::internal::audioPlayerMutex());
if (!AttachToDevice()) {
_failed = true;
return;
}
}
_alSource = CreateSource();
_alBuffer = CreateBuffer();
alBufferData(_alBuffer, _alFormat, _samples.data(), _samples.size(), _sampleRate);
alSourcei(_alSource, AL_BUFFER, _alBuffer);
}
void Track::updateState() {
if (!isActive() || !alIsSource(_alSource)) {
return;
}
auto state = ALint(0);
alGetSourcei(_alSource, AL_SOURCE_STATE, &state);
if (state != AL_PLAYING) {
_alPosition = 0;
if (_active) {
_active = false;
if (!_looping) {
_instance->trackFinished().notify(this, true);
}
}
} else {
auto currentPosition = ALint(0);
alGetSourcei(_alSource, AL_SAMPLE_OFFSET, &currentPosition);
_alPosition = currentPosition;
}
}
void Track::detachFromDevice() {
if (alIsSource(_alSource)) {
updateState();
alSourceStop(_alSource);
alSourcei(_alSource, AL_BUFFER, AL_NONE);
alDeleteBuffers(1, &_alBuffer);
alDeleteSources(1, &_alSource);
}
_alBuffer = 0;
_alSource = 0;
}
void Track::reattachToDevice() {
if (!isActive() || alIsSource(_alSource)) {
return;
}
createSource();
alSourcei(_alSource, AL_LOOPING, _looping ? 1 : 0);
alSourcei(_alSource, AL_SAMPLE_OFFSET, static_cast<ALint>(_alPosition));
alSourcePlay(_alSource);
}
bool Track::isActive() const {
return _active;
}
Track::~Track() {
detachFromDevice();
_instance->unregisterTrack(this);
}
Instance::Instance() {
_updateTimer.setCallback([this] {
auto hasActive = false;
for (auto track : _tracks) {
track->updateState();
if (track->isActive()) {
hasActive = true;
}
}
if (hasActive) {
Audio::StopDetachIfNotUsedSafe();
}
});
_updateTimer.callEach(100);
_detachFromDeviceTimer.setCallback([this] {
_detachFromDeviceForce = false;
Player::internal::DetachFromDevice();
});
}
std::unique_ptr<Track> Instance::createTrack() {
return std::make_unique<Track>(this);
}
Instance::~Instance() {
Expects(_tracks.empty());
}
void Instance::registerTrack(Track *track) {
_tracks.insert(track);
}
void Instance::unregisterTrack(Track *track) {
_tracks.erase(track);
}
void Instance::detachTracks() {
for (auto track : _tracks) {
track->detachFromDevice();
}
}
void Instance::reattachTracks() {
if (!IsAttachedToDevice()) {
return;
}
for (auto track : _tracks) {
track->reattachToDevice();
}
}
bool Instance::hasActiveTracks() const {
for (auto track : _tracks) {
if (track->isActive()) {
return true;
}
}
return false;
}
void Instance::scheduleDetachFromDevice() {
_detachFromDeviceForce = true;
scheduleDetachIfNotUsed();
}
void Instance::scheduleDetachIfNotUsed() {
if (!_detachFromDeviceTimer.isActive()) {
_detachFromDeviceTimer.callOnce(kDetachDeviceTimeout);
}
}
void Instance::stopDetachIfNotUsed() {
if (!_detachFromDeviceForce) {
_detachFromDeviceTimer.cancel();
}
}
Instance &Current() {
return Messenger::Instance().audio();
}
} // namespace Audio
} // namespace Media

View File

@ -0,0 +1,115 @@
/*
This file is part of Telegram Desktop,
the official desktop version of Telegram messaging app, see https://telegram.org
Telegram Desktop is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
It is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
In addition, as a special exception, the copyright holders give permission
to link the code of portions of this program with the OpenSSL library.
Full license: https://github.com/telegramdesktop/tdesktop/blob/master/LICENSE
Copyright (c) 2014-2017 John Preston, https://desktop.telegram.org
*/
#pragma once
#include "base/timer.h"
namespace Media {
namespace Audio {
class Instance;
class Track {
public:
Track(gsl::not_null<Instance*> instance);
void fillFromData(base::byte_vector &&data);
void fillFromFile(const FileLocation &location);
void fillFromFile(const QString &filePath);
void playOnce();
void playInLoop();
bool failed() const {
return _failed;
}
void detachFromDevice();
void reattachToDevice();
bool isActive() const;
void updateState();
~Track();
private:
void createSource();
gsl::not_null<Instance*> _instance;
bool _failed = false;
bool _active = false;
bool _looping = false;
float64 _volume = 1.;
int64 _samplesCount = 0;
int32 _sampleRate = 0;
base::byte_vector _samples;
TimeMs _lengthMs = 0;
int32 _alFormat = 0;
int64 _alPosition = 0;
uint32 _alSource = 0;
uint32 _alBuffer = 0;
};
class Instance {
public:
// Thread: Main.
Instance();
std::unique_ptr<Track> createTrack();
base::Observable<Track*> &trackFinished() {
return _trackFinished;
}
void detachTracks();
void reattachTracks();
bool hasActiveTracks() const;
void scheduleDetachFromDevice();
void scheduleDetachIfNotUsed();
void stopDetachIfNotUsed();
~Instance();
private:
friend class Track;
void registerTrack(Track *track);
void unregisterTrack(Track *track);
private:
std::set<Track*> _tracks;
base::Observable<Track*> _trackFinished;
base::Timer _updateTimer;
base::Timer _detachFromDeviceTimer;
bool _detachFromDeviceForce = false;
};
Instance &Current();
} // namespace Audio
} // namespace Media

View File

@ -32,7 +32,7 @@ VideoSoundData::~VideoSoundData() {
}
}
ChildFFMpegLoader::ChildFFMpegLoader(uint64 videoPlayId, std::unique_ptr<VideoSoundData> &&data) : AudioPlayerLoader(FileLocation(), QByteArray())
ChildFFMpegLoader::ChildFFMpegLoader(uint64 videoPlayId, std::unique_ptr<VideoSoundData> &&data) : AudioPlayerLoader(FileLocation(), QByteArray(), base::byte_vector())
, _videoPlayId(videoPlayId)
, _parentData(std::move(data)) {
_frame = av_frame_alloc();
@ -84,7 +84,7 @@ bool ChildFFMpegLoader::open(qint64 &position) {
if (_sampleSize < 0) {
_swrContext = swr_alloc();
if (!_swrContext) {
LOG(("Audio Error: Unable to swr_alloc for file '%1', data size '%2'").arg(file.name()).arg(data.size()));
LOG(("Audio Error: Unable to swr_alloc for file '%1', data size '%2'").arg(_file.name()).arg(_data.size()));
return false;
}
int64_t src_ch_layout = layout, dst_ch_layout = AudioToChannelLayout;
@ -100,7 +100,7 @@ bool ChildFFMpegLoader::open(qint64 &position) {
av_opt_set_sample_fmt(_swrContext, "out_sample_fmt", dst_sample_fmt, 0);
if ((res = swr_init(_swrContext)) < 0) {
LOG(("Audio Error: Unable to swr_init for file '%1', data size '%2', error %3, %4").arg(file.name()).arg(data.size()).arg(res).arg(av_make_error_string(err, sizeof(err), res)));
LOG(("Audio Error: Unable to swr_init for file '%1', data size '%2', error %3, %4").arg(_file.name()).arg(_data.size()).arg(res).arg(av_make_error_string(err, sizeof(err), res)));
return false;
}
@ -112,7 +112,7 @@ bool ChildFFMpegLoader::open(qint64 &position) {
_maxResampleSamples = av_rescale_rnd(AVBlockSize / _sampleSize, _dstRate, _srcRate, AV_ROUND_UP);
if ((res = av_samples_alloc_array_and_samples(&_dstSamplesData, 0, AudioToChannels, _maxResampleSamples, AudioToFormat, 0)) < 0) {
LOG(("Audio Error: Unable to av_samples_alloc for file '%1', data size '%2', error %3, %4").arg(file.name()).arg(data.size()).arg(res).arg(av_make_error_string(err, sizeof(err), res)));
LOG(("Audio Error: Unable to av_samples_alloc for file '%1', data size '%2', error %3, %4").arg(_file.name()).arg(_data.size()).arg(res).arg(av_make_error_string(err, sizeof(err), res)));
return false;
}
}
@ -133,7 +133,7 @@ AudioPlayerLoader::ReadResult ChildFFMpegLoader::readMore(QByteArray &result, in
return ReadResult::EndOfFile;
} else if (res != AVERROR(EAGAIN)) {
char err[AV_ERROR_MAX_STRING_SIZE] = { 0 };
LOG(("Audio Error: Unable to avcodec_receive_frame() file '%1', data size '%2', error %3, %4").arg(file.name()).arg(data.size()).arg(res).arg(av_make_error_string(err, sizeof(err), res)));
LOG(("Audio Error: Unable to avcodec_receive_frame() file '%1', data size '%2', error %3, %4").arg(_file.name()).arg(_data.size()).arg(res).arg(av_make_error_string(err, sizeof(err), res)));
return ReadResult::Error;
}
@ -155,7 +155,7 @@ AudioPlayerLoader::ReadResult ChildFFMpegLoader::readMore(QByteArray &result, in
FFMpeg::freePacket(&packet);
char err[AV_ERROR_MAX_STRING_SIZE] = { 0 };
LOG(("Audio Error: Unable to avcodec_send_packet() file '%1', data size '%2', error %3, %4").arg(file.name()).arg(data.size()).arg(res).arg(av_make_error_string(err, sizeof(err), res)));
LOG(("Audio Error: Unable to avcodec_send_packet() file '%1', data size '%2', error %3, %4").arg(_file.name()).arg(_data.size()).arg(res).arg(av_make_error_string(err, sizeof(err), res)));
// There is a sample voice message where skipping such packet
// results in a crash (read_access to nullptr) in swr_convert().
//if (res == AVERROR_INVALIDDATA) {
@ -177,13 +177,13 @@ AudioPlayerLoader::ReadResult ChildFFMpegLoader::readFromReadyFrame(QByteArray &
av_freep(&_dstSamplesData[0]);
if ((res = av_samples_alloc(_dstSamplesData, 0, AudioToChannels, _maxResampleSamples, AudioToFormat, 1)) < 0) {
char err[AV_ERROR_MAX_STRING_SIZE] = { 0 };
LOG(("Audio Error: Unable to av_samples_alloc for file '%1', data size '%2', error %3, %4").arg(file.name()).arg(data.size()).arg(res).arg(av_make_error_string(err, sizeof(err), res)));
LOG(("Audio Error: Unable to av_samples_alloc for file '%1', data size '%2', error %3, %4").arg(_file.name()).arg(_data.size()).arg(res).arg(av_make_error_string(err, sizeof(err), res)));
return ReadResult::Error;
}
}
if ((res = swr_convert(_swrContext, _dstSamplesData, dstSamples, (const uint8_t**)_frame->extended_data, _frame->nb_samples)) < 0) {
char err[AV_ERROR_MAX_STRING_SIZE] = { 0 };
LOG(("Audio Error: Unable to swr_convert for file '%1', data size '%2', error %3, %4").arg(file.name()).arg(data.size()).arg(res).arg(av_make_error_string(err, sizeof(err), res)));
LOG(("Audio Error: Unable to swr_convert for file '%1', data size '%2', error %3, %4").arg(_file.name()).arg(_data.size()).arg(res).arg(av_make_error_string(err, sizeof(err), res)));
return ReadResult::Error;
}
int32 resultLen = av_samples_get_buffer_size(0, AudioToChannels, res, AudioToFormat, 1);

View File

@ -33,8 +33,8 @@ Instance *SingleInstance = nullptr;
} // namespace
void start() {
InitAudio();
Capture::Init();
Audio::Start();
Capture::Start();
SingleInstance = new Instance();
}
@ -42,8 +42,8 @@ void start() {
void finish() {
delete base::take(SingleInstance);
Capture::DeInit();
DeInitAudio();
Capture::Finish();
Audio::Finish();
}
Instance::Instance() {

View File

@ -36,6 +36,7 @@ Copyright (c) 2014-2017 John Preston, https://desktop.telegram.org
#include "mtproto/dc_options.h"
#include "mtproto/mtp_instance.h"
#include "media/player/media_player_instance.h"
#include "media/media_audio_track.h"
#include "window/notifications_manager.h"
#include "window/themes/window_theme.h"
#include "history/history_location_manager.h"
@ -64,7 +65,8 @@ struct Messenger::Private {
};
Messenger::Messenger() : QObject()
, _private(std::make_unique<Private>()) {
, _private(std::make_unique<Private>())
, _audio(std::make_unique<Media::Audio::Instance>()) {
t_assert(SingleInstance == nullptr);
SingleInstance = this;
@ -750,7 +752,9 @@ void Messenger::clearPasscode() {
_passcodedChanged.notify();
}
void Messenger::prepareToDestroy() {
Messenger::~Messenger() {
Expects(SingleInstance == this);
_window.reset();
// Some MTP requests can be cancelled from data clearing.
@ -759,11 +763,6 @@ void Messenger::prepareToDestroy() {
_mtproto.reset();
_mtprotoForKeysDestroy.reset();
}
Messenger::~Messenger() {
t_assert(SingleInstance == this);
SingleInstance = nullptr;
Shortcuts::finish();
@ -783,6 +782,8 @@ Messenger::~Messenger() {
Local::finish();
Global::finish();
ThirdParty::finish();
SingleInstance = nullptr;
}
MainWindow *Messenger::mainWindow() {

View File

@ -41,6 +41,12 @@ namespace Local {
struct StoredAuthSession;
} // namespace Local
namespace Media {
namespace Audio {
class Instance;
} // namespace Audio
} // namespace Media
class Messenger final : public QObject, public RPCSender, private base::Subscriber {
Q_OBJECT
@ -50,7 +56,6 @@ public:
Messenger(const Messenger &other) = delete;
Messenger &operator=(const Messenger &other) = delete;
void prepareToDestroy();
~Messenger();
MainWindow *mainWindow();
@ -63,6 +68,7 @@ public:
return *result;
}
// MTProto components.
MTP::DcOptions *dcOptions() {
return _dcOptions.get();
}
@ -85,6 +91,7 @@ public:
void suggestMainDcId(MTP::DcId mainDcId);
void destroyStaleAuthorizationKeys();
// AuthSession component.
AuthSession *authSession() {
return _authSession.get();
}
@ -94,6 +101,11 @@ public:
return _authSessionChanged;
}
// Media component.
Media::Audio::Instance &audio() {
return *_audio;
}
void setInternalLinkDomain(const QString &domain) const;
QString createInternalLink(const QString &query) const;
QString createInternalLinkFull(const QString &query) const;
@ -179,4 +191,6 @@ private:
base::Observable<void> _authSessionChanged;
base::Observable<void> _passcodedChanged;
std::unique_ptr<Media::Audio::Instance> _audio;
};

View File

@ -0,0 +1,45 @@
/*
This file is part of Telegram Desktop,
the official desktop version of Telegram messaging app, see https://telegram.org
Telegram Desktop is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
It is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
In addition, as a special exception, the copyright holders give permission
to link the code of portions of this program with the OpenSSL library.
Full license: https://github.com/telegramdesktop/tdesktop/blob/master/LICENSE
Copyright (c) 2014-2017 John Preston, https://desktop.telegram.org
*/
#include <iconv.h>
#ifdef iconv_open
#undef iconv_open
#endif // iconv_open
#ifdef iconv
#undef iconv
#endif // iconv
#ifdef iconv_close
#undef iconv_close
#endif // iconv_close
iconv_t iconv_open(const char* tocode, const char* fromcode) {
return libiconv_open(tocode, fromcode);
}
size_t iconv(iconv_t cd, char** inbuf, size_t *inbytesleft, char** outbuf, size_t *outbytesleft) {
return libiconv(cd, inbuf, inbytesleft, outbuf, outbytesleft);
}
int iconv_close(iconv_t cd) {
return libiconv_close(cd);
}

View File

@ -153,7 +153,7 @@ ApplicationDelegate *_sharedDelegate = nil;
}
LOG(("Audio Info: -receiveWakeNote: received, scheduling detach from audio device"));
Media::Player::DetachFromDeviceByTimer();
Media::Audio::ScheduleDetachFromDeviceSafe();
}
- (void) setWatchingMediaKeys:(bool)watching {

View File

@ -105,7 +105,7 @@ STDMETHODIMP DeviceListener::OnPropertyValueChanged(LPCWSTR device_id, const PRO
// || key.fmtid == pkey_AudioUnknown2
|| false) {
LOG(("Audio Info: OnPropertyValueChanged(%1, %2) scheduling detach from audio device.").arg(deviceName).arg(keyName));
Media::Player::DetachFromDeviceByTimer();
Media::Audio::ScheduleDetachFromDeviceSafe();
} else {
DEBUG_LOG(("Audio Info: OnPropertyValueChanged(%1, %2) unknown, skipping.").arg(deviceName).arg(keyName));
}
@ -115,7 +115,7 @@ STDMETHODIMP DeviceListener::OnPropertyValueChanged(LPCWSTR device_id, const PRO
STDMETHODIMP DeviceListener::OnDeviceStateChanged(LPCWSTR device_id, DWORD new_state) {
auto deviceName = device_id ? '"' + QString::fromWCharArray(device_id) + '"' : QString("nullptr");
LOG(("Audio Info: OnDeviceStateChanged(%1, %2) scheduling detach from audio device.").arg(deviceName).arg(new_state));
Media::Player::DetachFromDeviceByTimer();
Media::Audio::ScheduleDetachFromDeviceSafe();
return S_OK;
}
@ -127,7 +127,7 @@ STDMETHODIMP DeviceListener::OnDefaultDeviceChanged(EDataFlow flow, ERole role,
}
LOG(("Audio Info: OnDefaultDeviceChanged() scheduling detach from audio device, flow %1, role %2, new_default_device_id: %3").arg(flow).arg(role).arg(new_default_device_id ? '"' + QString::fromWCharArray(new_default_device_id) + '"' : QString("nullptr")));
Media::Player::DetachFromDeviceByTimer();
Media::Audio::ScheduleDetachFromDeviceSafe();
return S_OK;
}

View File

@ -84,6 +84,7 @@
'defines': [
'AL_LIBTYPE_STATIC',
'AL_ALEXT_PROTOTYPES',
'TGVOIP_USE_CXX11_LIB',
'<!@(python -c "for s in \'<(build_defines)\'.split(\',\'): print(s)")',
],

View File

@ -207,6 +207,8 @@
<(src_loc)/media/media_audio_loader.h
<(src_loc)/media/media_audio_loaders.cpp
<(src_loc)/media/media_audio_loaders.h
<(src_loc)/media/media_audio_track.cpp
<(src_loc)/media/media_audio_track.h
<(src_loc)/media/media_child_ffmpeg_loader.cpp
<(src_loc)/media/media_child_ffmpeg_loader.h
<(src_loc)/media/media_clip_ffmpeg.cpp
@ -266,6 +268,7 @@
<(src_loc)/platform/linux/specific_linux.h
<(src_loc)/platform/mac/file_utilities_mac.mm
<(src_loc)/platform/mac/file_utilities_mac.h
<(src_loc)/platform/mac/mac_iconv_helper.c
<(src_loc)/platform/mac/mac_utilities.mm
<(src_loc)/platform/mac/mac_utilities.h
<(src_loc)/platform/mac/main_window_mac.mm