From 6ae68b337db256b0fbdd18896da451f445f137e0 Mon Sep 17 00:00:00 2001 From: John Preston Date: Thu, 19 Jan 2017 11:24:43 +0300 Subject: [PATCH] Audio capture moved to a separate module. --- Telegram/SourceFiles/app.cpp | 6 +- .../history/history_media_types.cpp | 70 +- Telegram/SourceFiles/historywidget.cpp | 19 +- .../inline_bot_layout_internal.cpp | 34 +- Telegram/SourceFiles/mainwidget.cpp | 77 +- Telegram/SourceFiles/media/media_audio.cpp | 1354 +++++------------ Telegram/SourceFiles/media/media_audio.h | 131 +- .../SourceFiles/media/media_audio_capture.cpp | 700 +++++++++ .../SourceFiles/media/media_audio_capture.h | 101 ++ .../SourceFiles/media/media_audio_loaders.cpp | 66 +- .../SourceFiles/media/media_audio_loaders.h | 19 +- .../SourceFiles/media/media_clip_ffmpeg.cpp | 18 +- .../media/player/media_player_cover.cpp | 63 +- .../media/player/media_player_instance.cpp | 46 +- .../media/player/media_player_instance.h | 5 - .../media/player/media_player_list.cpp | 25 +- .../media/player/media_player_widget.cpp | 68 +- Telegram/SourceFiles/mediaview.cpp | 11 +- .../SourceFiles/overview/overview_layout.cpp | 34 +- Telegram/SourceFiles/overviewwidget.cpp | 4 +- Telegram/SourceFiles/settings.cpp | 3 - Telegram/SourceFiles/settings.h | 3 - Telegram/SourceFiles/shortcuts.cpp | 42 +- Telegram/SourceFiles/structs.cpp | 44 +- Telegram/gyp/Telegram.gyp | 2 + 25 files changed, 1516 insertions(+), 1429 deletions(-) create mode 100644 Telegram/SourceFiles/media/media_audio_capture.cpp create mode 100644 Telegram/SourceFiles/media/media_audio_capture.h diff --git a/Telegram/SourceFiles/app.cpp b/Telegram/SourceFiles/app.cpp index dbb4b9426a..a4b6afa911 100644 --- a/Telegram/SourceFiles/app.cpp +++ b/Telegram/SourceFiles/app.cpp @@ -195,9 +195,7 @@ namespace { Global::SetLocalPasscode(false); Global::RefLocalPasscodeChanged().notify(); } - if (audioPlayer()) { - audioPlayer()->stopAndClear(); - } + Media::Player::mixer()->stopAndClear(); if (auto w = wnd()) { w->tempDirDelete(Local::ClearManagerAll); w->notifyClearFast(); @@ -2436,7 +2434,7 @@ namespace { void playSound() { if (Global::SoundNotify() && !Platform::Notifications::skipAudio()) { - audioPlayNotify(); + Media::Player::PlayNotify(); } } diff --git a/Telegram/SourceFiles/history/history_media_types.cpp b/Telegram/SourceFiles/history/history_media_types.cpp index e754aebdf0..75dc4c42ac 100644 --- a/Telegram/SourceFiles/history/history_media_types.cpp +++ b/Telegram/SourceFiles/history/history_media_types.cpp @@ -1437,47 +1437,45 @@ bool HistoryDocument::updateStatusText() const { statusSize = _data->loadOffset(); } else if (_data->loaded()) { statusSize = FileStatusSizeLoaded; - if (audioPlayer()) { - if (_data->voice()) { - AudioMsgId playing; - auto playbackState = audioPlayer()->currentState(&playing, AudioMsgId::Type::Voice); - if (playing == AudioMsgId(_data, _parent->fullId()) && !(playbackState.state & AudioPlayerStoppedMask) && playbackState.state != AudioPlayerFinishing) { - if (auto voice = Get()) { - bool was = voice->_playback; - voice->ensurePlayback(this); - if (!was || playbackState.position != voice->_playback->_position) { - float64 prg = playbackState.duration ? snap(float64(playbackState.position) / playbackState.duration, 0., 1.) : 0.; - if (voice->_playback->_position < playbackState.position) { - voice->_playback->a_progress.start(prg); - } else { - voice->_playback->a_progress = anim::value(0., prg); - } - voice->_playback->_position = playbackState.position; - voice->_playback->_a_progress.start(); + if (_data->voice()) { + AudioMsgId playing; + auto playbackState = Media::Player::mixer()->currentState(&playing, AudioMsgId::Type::Voice); + if (playing == AudioMsgId(_data, _parent->fullId()) && !(playbackState.state & AudioPlayerStoppedMask) && playbackState.state != AudioPlayerFinishing) { + if (auto voice = Get()) { + bool was = voice->_playback; + voice->ensurePlayback(this); + if (!was || playbackState.position != voice->_playback->_position) { + float64 prg = playbackState.duration ? snap(float64(playbackState.position) / playbackState.duration, 0., 1.) : 0.; + if (voice->_playback->_position < playbackState.position) { + voice->_playback->a_progress.start(prg); + } else { + voice->_playback->a_progress = anim::value(0., prg); } + voice->_playback->_position = playbackState.position; + voice->_playback->_a_progress.start(); } + } - statusSize = -1 - (playbackState.position / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency)); - realDuration = playbackState.duration / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency); - showPause = (playbackState.state == AudioPlayerPlaying || playbackState.state == AudioPlayerResuming || playbackState.state == AudioPlayerStarting); - } else { - if (auto voice = Get()) { - voice->checkPlaybackFinished(); - } - } - } else if (_data->song()) { - AudioMsgId playing; - auto playbackState = audioPlayer()->currentState(&playing, AudioMsgId::Type::Song); - if (playing == AudioMsgId(_data, _parent->fullId()) && !(playbackState.state & AudioPlayerStoppedMask) && playbackState.state != AudioPlayerFinishing) { - statusSize = -1 - (playbackState.position / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency)); - realDuration = playbackState.duration / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency); - showPause = (playbackState.state == AudioPlayerPlaying || playbackState.state == AudioPlayerResuming || playbackState.state == AudioPlayerStarting); - } else { - } - if (!showPause && (playing == AudioMsgId(_data, _parent->fullId()))) { - showPause = (Media::Player::exists() && Media::Player::instance()->isSeeking()); + statusSize = -1 - (playbackState.position / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency)); + realDuration = playbackState.duration / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency); + showPause = (playbackState.state == AudioPlayerPlaying || playbackState.state == AudioPlayerResuming || playbackState.state == AudioPlayerStarting); + } else { + if (auto voice = Get()) { + voice->checkPlaybackFinished(); } } + } else if (_data->song()) { + AudioMsgId playing; + auto playbackState = Media::Player::mixer()->currentState(&playing, AudioMsgId::Type::Song); + if (playing == AudioMsgId(_data, _parent->fullId()) && !(playbackState.state & AudioPlayerStoppedMask) && playbackState.state != AudioPlayerFinishing) { + statusSize = -1 - (playbackState.position / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency)); + realDuration = playbackState.duration / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency); + showPause = (playbackState.state == AudioPlayerPlaying || playbackState.state == AudioPlayerResuming || playbackState.state == AudioPlayerStarting); + } else { + } + if (!showPause && (playing == AudioMsgId(_data, _parent->fullId()))) { + showPause = Media::Player::instance()->isSeeking(); + } } } else { statusSize = FileStatusSizeReady; diff --git a/Telegram/SourceFiles/historywidget.cpp b/Telegram/SourceFiles/historywidget.cpp index 52c9addd84..e88596faf0 100644 --- a/Telegram/SourceFiles/historywidget.cpp +++ b/Telegram/SourceFiles/historywidget.cpp @@ -52,6 +52,7 @@ Copyright (c) 2014-2017 John Preston, https://desktop.telegram.org #include "mainwindow.h" #include "fileuploader.h" #include "media/media_audio.h" +#include "media/media_audio_capture.h" #include "localstorage.h" #include "apiwrap.h" #include "window/top_bar_widget.h" @@ -3115,11 +3116,9 @@ HistoryWidget::HistoryWidget(QWidget *parent) : TWidget(parent) connect(_emojiPan, SIGNAL(updateStickers()), this, SLOT(updateStickers())); connect(&_sendActionStopTimer, SIGNAL(timeout()), this, SLOT(onCancelSendAction())); connect(&_previewTimer, SIGNAL(timeout()), this, SLOT(onPreviewTimeout())); - if (audioCapture()) { - connect(audioCapture(), SIGNAL(error()), this, SLOT(onRecordError())); - connect(audioCapture(), SIGNAL(updated(quint16,qint32)), this, SLOT(onRecordUpdate(quint16,qint32))); - connect(audioCapture(), SIGNAL(done(QByteArray,VoiceWaveform,qint32)), this, SLOT(onRecordDone(QByteArray,VoiceWaveform,qint32))); - } + connect(Media::Capture::instance(), SIGNAL(error()), this, SLOT(onRecordError())); + connect(Media::Capture::instance(), SIGNAL(updated(quint16,qint32)), this, SLOT(onRecordUpdate(quint16,qint32))); + connect(Media::Capture::instance(), SIGNAL(done(QByteArray,VoiceWaveform,qint32)), this, SLOT(onRecordDone(QByteArray,VoiceWaveform,qint32))); _attachToggle->setClickedCallback(App::LambdaDelayed(st::historyAttach.ripple.hideDuration, this, [this] { chooseAttach(); @@ -5649,10 +5648,10 @@ void HistoryWidget::leaveToChildEvent(QEvent *e, QWidget *child) { // e -- from } void HistoryWidget::recordStartCallback() { - if (!cHasAudioCapture()) { + if (!Media::Capture::instance()->available()) { return; } - emit audioCapture()->start(); + emit Media::Capture::instance()->start(); _recording = _inField = true; updateControlsVisibility(); @@ -5680,13 +5679,13 @@ void HistoryWidget::mouseReleaseEvent(QMouseEvent *e) { _attachDrag = DragStateNone; updateDragAreas(); } - if (_recording && cHasAudioCapture()) { + if (_recording) { stopRecording(_peer && _inField); } } void HistoryWidget::stopRecording(bool send) { - emit audioCapture()->stop(send); + emit Media::Capture::instance()->stop(send); a_recordingLevel = anim::value(); _a_recording.stop(); @@ -6045,7 +6044,7 @@ bool HistoryWidget::isMuteUnmute() const { } bool HistoryWidget::showRecordButton() const { - return cHasAudioCapture() && !_field->hasSendText() && !readyToForward() && !_editMsgId; + return Media::Capture::instance()->available() && !_field->hasSendText() && !readyToForward() && !_editMsgId; } bool HistoryWidget::showInlineBotCancel() const { diff --git a/Telegram/SourceFiles/inline_bots/inline_bot_layout_internal.cpp b/Telegram/SourceFiles/inline_bots/inline_bot_layout_internal.cpp index 0636676d8a..5934bd6166 100644 --- a/Telegram/SourceFiles/inline_bots/inline_bot_layout_internal.cpp +++ b/Telegram/SourceFiles/inline_bots/inline_bot_layout_internal.cpp @@ -832,28 +832,24 @@ bool File::updateStatusText() const { } else if (document->loaded()) { if (document->voice()) { statusSize = FileStatusSizeLoaded; - if (audioPlayer()) { - AudioMsgId playing; - auto playbackState = audioPlayer()->currentState(&playing, AudioMsgId::Type::Voice); - if (playing == AudioMsgId(document, FullMsgId()) && !(playbackState.state & AudioPlayerStoppedMask) && playbackState.state != AudioPlayerFinishing) { - statusSize = -1 - (playbackState.position / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency)); - realDuration = playbackState.duration / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency); - showPause = (playbackState.state == AudioPlayerPlaying || playbackState.state == AudioPlayerResuming || playbackState.state == AudioPlayerStarting); - } + AudioMsgId playing; + auto playbackState = Media::Player::mixer()->currentState(&playing, AudioMsgId::Type::Voice); + if (playing == AudioMsgId(document, FullMsgId()) && !(playbackState.state & AudioPlayerStoppedMask) && playbackState.state != AudioPlayerFinishing) { + statusSize = -1 - (playbackState.position / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency)); + realDuration = playbackState.duration / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency); + showPause = (playbackState.state == AudioPlayerPlaying || playbackState.state == AudioPlayerResuming || playbackState.state == AudioPlayerStarting); } } else if (document->song()) { statusSize = FileStatusSizeLoaded; - if (audioPlayer()) { - AudioMsgId playing; - auto playbackState = audioPlayer()->currentState(&playing, AudioMsgId::Type::Song); - if (playing == AudioMsgId(document, FullMsgId()) && !(playbackState.state & AudioPlayerStoppedMask) && playbackState.state != AudioPlayerFinishing) { - statusSize = -1 - (playbackState.position / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency)); - realDuration = playbackState.duration / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency); - showPause = (playbackState.state == AudioPlayerPlaying || playbackState.state == AudioPlayerResuming || playbackState.state == AudioPlayerStarting); - } - if (!showPause && (playing == AudioMsgId(document, FullMsgId())) && Media::Player::exists() && Media::Player::instance()->isSeeking()) { - showPause = true; - } + AudioMsgId playing; + auto playbackState = Media::Player::mixer()->currentState(&playing, AudioMsgId::Type::Song); + if (playing == AudioMsgId(document, FullMsgId()) && !(playbackState.state & AudioPlayerStoppedMask) && playbackState.state != AudioPlayerFinishing) { + statusSize = -1 - (playbackState.position / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency)); + realDuration = playbackState.duration / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency); + showPause = (playbackState.state == AudioPlayerPlaying || playbackState.state == AudioPlayerResuming || playbackState.state == AudioPlayerStarting); + } + if (!showPause && (playing == AudioMsgId(document, FullMsgId())) && Media::Player::instance()->isSeeking()) { + showPause = true; } } else { statusSize = FileStatusSizeLoaded; diff --git a/Telegram/SourceFiles/mainwidget.cpp b/Telegram/SourceFiles/mainwidget.cpp index 1ee9c53698..743a732fbe 100644 --- a/Telegram/SourceFiles/mainwidget.cpp +++ b/Telegram/SourceFiles/mainwidget.cpp @@ -100,13 +100,11 @@ MainWidget::MainWidget(QWidget *parent) : TWidget(parent) connect(_topBar, SIGNAL(clicked()), this, SLOT(onTopBarClick())); connect(_history, SIGNAL(historyShown(History*,MsgId)), this, SLOT(onHistoryShown(History*,MsgId))); connect(&updateNotifySettingTimer, SIGNAL(timeout()), this, SLOT(onUpdateNotifySettings())); - if (auto player = audioPlayer()) { - subscribe(player, [this](const AudioMsgId &audioId) { - if (audioId.type() != AudioMsgId::Type::Video) { - handleAudioUpdate(audioId); - } - }); - } + subscribe(Media::Player::Updated(), [this](const AudioMsgId &audioId) { + if (audioId.type() != AudioMsgId::Type::Video) { + handleAudioUpdate(audioId); + } + }); subscribe(Global::RefDialogsListFocused(), [this](bool) { updateDialogsWidthAnimated(); @@ -133,30 +131,28 @@ MainWidget::MainWidget(QWidget *parent) : TWidget(parent) }); connect(&_cacheBackgroundTimer, SIGNAL(timeout()), this, SLOT(onCacheBackground())); - if (Media::Player::exists()) { - _playerPanel->setPinCallback([this] { switchToFixedPlayer(); }); - _playerPanel->setCloseCallback([this] { closeBothPlayers(); }); - subscribe(Media::Player::instance()->titleButtonOver(), [this](bool over) { - if (over) { - _playerPanel->showFromOther(); - } else { - _playerPanel->hideFromOther(); + _playerPanel->setPinCallback([this] { switchToFixedPlayer(); }); + _playerPanel->setCloseCallback([this] { closeBothPlayers(); }); + subscribe(Media::Player::instance()->titleButtonOver(), [this](bool over) { + if (over) { + _playerPanel->showFromOther(); + } else { + _playerPanel->hideFromOther(); + } + }); + subscribe(Media::Player::instance()->playerWidgetOver(), [this](bool over) { + if (over) { + if (_playerPlaylist->isHidden()) { + auto position = mapFromGlobal(QCursor::pos()).x(); + auto bestPosition = _playerPlaylist->bestPositionFor(position); + if (rtl()) bestPosition = position + 2 * (position - bestPosition) - _playerPlaylist->width(); + updateMediaPlaylistPosition(bestPosition); } - }); - subscribe(Media::Player::instance()->playerWidgetOver(), [this](bool over) { - if (over) { - if (_playerPlaylist->isHidden()) { - auto position = mapFromGlobal(QCursor::pos()).x(); - auto bestPosition = _playerPlaylist->bestPositionFor(position); - if (rtl()) bestPosition = position + 2 * (position - bestPosition) - _playerPlaylist->width(); - updateMediaPlaylistPosition(bestPosition); - } - _playerPlaylist->showFromOther(); - } else { - _playerPlaylist->hideFromOther(); - } - }); - } + _playerPlaylist->showFromOther(); + } else { + _playerPlaylist->hideFromOther(); + } + }); subscribe(Adaptive::Changed(), [this]() { handleAdaptiveLayoutUpdate(); }); @@ -1568,10 +1564,10 @@ void MainWidget::ui_autoplayMediaInlineAsync(qint32 channelId, qint32 msgId) { void MainWidget::handleAudioUpdate(const AudioMsgId &audioId) { AudioMsgId playing; - auto playbackState = audioPlayer()->currentState(&playing, audioId.type()); + auto playbackState = Media::Player::mixer()->currentState(&playing, audioId.type()); if (playing == audioId && playbackState.state == AudioPlayerStoppedAtStart) { playbackState.state = AudioPlayerStopped; - audioPlayer()->clearStoppedAtStart(audioId); + Media::Player::mixer()->clearStoppedAtStart(audioId); auto document = audioId.audio(); auto filepath = document->filepath(DocumentData::FilePathResolveSaveFromData); @@ -1584,7 +1580,7 @@ void MainWidget::handleAudioUpdate(const AudioMsgId &audioId) { if (playing == audioId && audioId.type() == AudioMsgId::Type::Song) { if (!(playbackState.state & AudioPlayerStoppedMask) && playbackState.state != AudioPlayerFinishing) { - if (!_playerUsingPanel && !_player && Media::Player::exists()) { + if (!_playerUsingPanel && !_player) { createPlayer(); } } else if (_player && _player->isHidden() && !_playerUsingPanel) { @@ -1642,15 +1638,10 @@ void MainWidget::closeBothPlayers() { } _playerVolume.destroyDelayed(); - if (Media::Player::exists()) { - Media::Player::instance()->usePanelPlayer().notify(false, true); - } + Media::Player::instance()->usePanelPlayer().notify(false, true); _playerPanel->hideIgnoringEnterEvents(); _playerPlaylist->hideIgnoringEnterEvents(); - - if (Media::Player::exists()) { - Media::Player::instance()->stop(); - } + Media::Player::instance()->stop(); Shortcuts::disableMediaShortcuts(); } @@ -1685,7 +1676,7 @@ void MainWidget::playerHeightUpdated() { } if (!_playerHeight && _player->isHidden()) { AudioMsgId playing; - auto playbackState = audioPlayer()->currentState(&playing, AudioMsgId::Type::Song); + auto playbackState = Media::Player::mixer()->currentState(&playing, AudioMsgId::Type::Song); if (playing && (playbackState.state & AudioPlayerStoppedMask)) { _playerVolume.destroyDelayed(); _player.destroyDelayed(); @@ -1714,9 +1705,7 @@ void MainWidget::documentLoadProgress(DocumentData *document) { App::wnd()->documentUpdated(document); if (!document->loaded() && document->song()) { - if (Media::Player::exists()) { - Media::Player::instance()->documentLoadProgress(document); - } + Media::Player::instance()->documentLoadProgress(document); } } diff --git a/Telegram/SourceFiles/media/media_audio.cpp b/Telegram/SourceFiles/media/media_audio.cpp index 883570150b..98862cba91 100644 --- a/Telegram/SourceFiles/media/media_audio.cpp +++ b/Telegram/SourceFiles/media/media_audio.cpp @@ -33,6 +33,9 @@ Copyright (c) 2014-2017 John Preston, https://desktop.telegram.org #include +Q_DECLARE_METATYPE(AudioMsgId); +Q_DECLARE_METATYPE(VoiceWaveform); + extern "C" { #ifdef Q_OS_MAC #include @@ -55,131 +58,82 @@ int iconv_close(iconv_t cd) { } // extern "C" namespace { - ALCdevice *audioDevice = 0; - ALCcontext *audioContext = 0; - ALuint notifySource = 0; - ALuint notifyBuffer = 0; - TimeMs notifyLengthMs = 0; +QMutex AudioMutex; +ALCdevice *AudioDevice = nullptr; +ALCcontext *AudioContext = nullptr; - QMutex playerMutex; - AudioPlayer *player = 0; +float64 suppressAllGain = 1., suppressSongGain = 1.; - float64 suppressAllGain = 1., suppressSongGain = 1.; +} // namespace - AudioCapture *capture = 0; -} +namespace Media { +namespace Player { +namespace { -bool _checkALCError() { - ALenum errCode; - if ((errCode = alcGetError(audioDevice)) != ALC_NO_ERROR) { - LOG(("Audio Error: (alc) %1, %2").arg(errCode).arg((const char *)alcGetString(audioDevice, errCode))); - return false; - } - return true; -} +struct NotifySound { + QByteArray data; + TimeMs lengthMs = 0; + int sampleRate = 0; -bool _checkCaptureError(ALCdevice *device) { - ALenum errCode; - if ((errCode = alcGetError(device)) != ALC_NO_ERROR) { - LOG(("Audio Error: (capture) %1, %2").arg(errCode).arg((const char *)alcGetString(audioDevice, errCode))); - return false; - } - return true; -} + ALenum alFormat = 0; -bool _checkALError() { - ALenum errCode; - if ((errCode = alGetError()) != AL_NO_ERROR) { - LOG(("Audio Error: (al) %1, %2").arg(errCode).arg((const char *)alGetString(errCode))); - return false; - } - return true; -} + ALuint source = 0; + ALuint buffer = 0; +}; +NotifySound DefaultNotify; -Q_DECLARE_METATYPE(AudioMsgId); -Q_DECLARE_METATYPE(VoiceWaveform); -void audioInit() { - if (!capture) { - capture = new AudioCapture(); - cSetHasAudioCapture(capture->check()); - } +void PrepareNotifySound() { + auto content = ([] { + QFile soundFile(":/gui/art/newmsg.wav"); + soundFile.open(QIODevice::ReadOnly); + return soundFile.readAll(); + })(); + auto data = content.constData(); + auto size = content.size(); + t_assert(size >= 44); - if (audioDevice) return; + t_assert(*((const uint32*)(data + 0)) == 0x46464952); // ChunkID - "RIFF" + t_assert(*((const uint32*)(data + 4)) == uint32(size - 8)); // ChunkSize + t_assert(*((const uint32*)(data + 8)) == 0x45564157); // Format - "WAVE" + t_assert(*((const uint32*)(data + 12)) == 0x20746d66); // Subchunk1ID - "fmt " + auto subchunk1Size = *((const uint32*)(data + 16)); + auto extra = subchunk1Size - 16; + t_assert(subchunk1Size >= 16 && (!extra || extra >= 2)); + t_assert(*((const uint16*)(data + 20)) == 1); // AudioFormat - PCM (1) - audioDevice = alcOpenDevice(0); - if (!audioDevice) { - LOG(("Audio Error: default sound device not present.")); - return; - } + auto numChannels = *((const uint16*)(data + 22)); + t_assert(numChannels == 1 || numChannels == 2); - ALCint attributes[] = { ALC_STEREO_SOURCES, 8, 0 }; - audioContext = alcCreateContext(audioDevice, attributes); - alcMakeContextCurrent(audioContext); - if (!_checkALCError()) return audioFinish(); + auto sampleRate = *((const uint32*)(data + 24)); + auto byteRate = *((const uint32*)(data + 28)); - ALfloat v[] = { 0.f, 0.f, -1.f, 0.f, 1.f, 0.f }; - alListener3f(AL_POSITION, 0.f, 0.f, 0.f); - alListener3f(AL_VELOCITY, 0.f, 0.f, 0.f); - alListenerfv(AL_ORIENTATION, v); + auto blockAlign = *((const uint16*)(data + 32)); + auto bitsPerSample = *((const uint16*)(data + 34)); + t_assert(!(bitsPerSample % 8)); - alDistanceModel(AL_NONE); + auto bytesPerSample = bitsPerSample / 8; + t_assert(bytesPerSample == 1 || bytesPerSample == 2); - alGenSources(1, ¬ifySource); - alSourcef(notifySource, AL_PITCH, 1.f); - alSourcef(notifySource, AL_GAIN, 1.f); - alSource3f(notifySource, AL_POSITION, 0, 0, 0); - alSource3f(notifySource, AL_VELOCITY, 0, 0, 0); - alSourcei(notifySource, AL_LOOPING, 0); - - alGenBuffers(1, ¬ifyBuffer); - if (!_checkALError()) return audioFinish(); - - QFile notify(":/gui/art/newmsg.wav"); - if (!notify.open(QIODevice::ReadOnly)) return audioFinish(); - - QByteArray blob = notify.readAll(); - const char *data = blob.constData(); - if (blob.size() < 44) return audioFinish(); - - if (*((const uint32*)(data + 0)) != 0x46464952) return audioFinish(); // ChunkID - "RIFF" - if (*((const uint32*)(data + 4)) != uint32(blob.size() - 8)) return audioFinish(); // ChunkSize - if (*((const uint32*)(data + 8)) != 0x45564157) return audioFinish(); // Format - "WAVE" - if (*((const uint32*)(data + 12)) != 0x20746d66) return audioFinish(); // Subchunk1ID - "fmt " - uint32 subchunk1Size = *((const uint32*)(data + 16)), extra = subchunk1Size - 16; - if (subchunk1Size < 16 || (extra && extra < 2)) return audioFinish(); - if (*((const uint16*)(data + 20)) != 1) return audioFinish(); // AudioFormat - PCM (1) - - uint16 numChannels = *((const uint16*)(data + 22)); - if (numChannels != 1 && numChannels != 2) return audioFinish(); - - uint32 sampleRate = *((const uint32*)(data + 24)); - uint32 byteRate = *((const uint32*)(data + 28)); - - uint16 blockAlign = *((const uint16*)(data + 32)); - uint16 bitsPerSample = *((const uint16*)(data + 34)); - if (bitsPerSample % 8) return audioFinish(); - uint16 bytesPerSample = bitsPerSample / 8; - if (bytesPerSample != 1 && bytesPerSample != 2) return audioFinish(); - - if (blockAlign != numChannels * bytesPerSample) return audioFinish(); - if (byteRate != sampleRate * blockAlign) return audioFinish(); + t_assert(blockAlign == numChannels * bytesPerSample); + t_assert(byteRate == sampleRate * blockAlign); if (extra) { - uint16 extraSize = *((const uint16*)(data + 36)); - if (uint32(extraSize + 2) != extra) return audioFinish(); - if (uint32(blob.size()) < 44 + extra) return audioFinish(); + auto extraSize = *((const uint16*)(data + 36)); + t_assert(uint32(extraSize + 2) == extra); + t_assert(uint32(size) >= 44 + extra); } - if (*((const uint32*)(data + extra + 36)) != 0x61746164) return audioFinish(); // Subchunk2ID - "data" - uint32 subchunk2Size = *((const uint32*)(data + extra + 40)); - if (subchunk2Size % (numChannels * bytesPerSample)) return audioFinish(); - uint32 numSamples = subchunk2Size / (numChannels * bytesPerSample); + t_assert(*((const uint32*)(data + extra + 36)) == 0x61746164); // Subchunk2ID - "data" + auto subchunk2Size = *((const uint32*)(data + extra + 40)); - if (uint32(blob.size()) < 44 + extra + subchunk2Size) return audioFinish(); + t_assert(!(subchunk2Size % (numChannels * bytesPerSample))); + auto numSamples = subchunk2Size / (numChannels * bytesPerSample); + + t_assert(uint32(size) >= 44 + extra + subchunk2Size); data += 44 + extra; - ALenum format = 0; + auto format = ALenum(0); switch (bytesPerSample) { case 1: switch (numChannels) { @@ -195,73 +149,218 @@ void audioInit() { } break; } - if (!format) return audioFinish(); + t_assert(format != 0); - int32 addBytes = (sampleRate * 15 / 100) * bytesPerSample * numChannels; // add 150ms of silence - QByteArray fullData(addBytes + subchunk2Size, (bytesPerSample == 1) ? 128 : 0); - memcpy(fullData.data() + addBytes, data, subchunk2Size); - alBufferData(notifyBuffer, format, fullData.constData(), fullData.size(), sampleRate); - alSourcei(notifySource, AL_BUFFER, notifyBuffer); + DefaultNotify.alFormat = format; + DefaultNotify.sampleRate = sampleRate; + auto addBytes = (sampleRate * 15 / 100) * bytesPerSample * numChannels; // add 150ms of silence + DefaultNotify.data = QByteArray(addBytes + subchunk2Size, (bytesPerSample == 1) ? 128 : 0); + memcpy(DefaultNotify.data.data() + addBytes, data, subchunk2Size); + DefaultNotify.lengthMs = (numSamples * 1000LL / sampleRate); +} - notifyLengthMs = (numSamples * 1000ULL / sampleRate); +base::Observable UpdatedObservable; - if (!_checkALError()) return audioFinish(); +Mixer *MixerInstance = nullptr; + +bool ContextErrorHappened() { + ALenum errCode; + if ((errCode = alcGetError(AudioDevice)) != ALC_NO_ERROR) { + LOG(("Audio Context Error: %1, %2").arg(errCode).arg((const char *)alcGetString(AudioDevice, errCode))); + return true; + } + return false; +} + +bool PlaybackErrorHappened() { + ALenum errCode; + if ((errCode = alGetError()) != AL_NO_ERROR) { + LOG(("Audio Playback Error: %1, %2").arg(errCode).arg((const char *)alGetString(errCode))); + return true; + } + return false; +} + +void EnumeratePlaybackDevices() { + auto deviceNames = QStringList(); + auto devices = alcGetString(nullptr, ALC_DEVICE_SPECIFIER); + t_assert(devices != nullptr); + while (*devices != 0) { + auto deviceName8Bit = QByteArray(devices); + auto deviceName = QString::fromLocal8Bit(deviceName8Bit); + deviceNames.append(deviceName); + devices += deviceName8Bit.size() + 1; + } + LOG(("Audio Playback Devices: %1").arg(deviceNames.join(';'))); + + if (auto device = alcGetString(nullptr, ALC_DEFAULT_DEVICE_SPECIFIER)) { + LOG(("Audio Playback Default Device: %1").arg(QString::fromLocal8Bit(device))); + } else { + LOG(("Audio Playback Default Device: (null)")); + } +} + +void EnumerateCaptureDevices() { + auto deviceNames = QStringList(); + auto devices = alcGetString(nullptr, ALC_CAPTURE_DEVICE_SPECIFIER); + t_assert(devices != nullptr); + while (*devices != 0) { + auto deviceName8Bit = QByteArray(devices); + auto deviceName = QString::fromLocal8Bit(deviceName8Bit); + deviceNames.append(deviceName); + devices += deviceName8Bit.size() + 1; + } + LOG(("Audio Capture Devices: %1").arg(deviceNames.join(';'))); + + if (auto device = alcGetString(nullptr, ALC_CAPTURE_DEFAULT_DEVICE_SPECIFIER)) { + LOG(("Audio Capture Default Device: %1").arg(QString::fromLocal8Bit(device))); + } else { + LOG(("Audio Capture Default Device: (null)")); + } +} + +ALuint CreateSource() { + auto source = ALuint(0); + alGenSources(1, &source); + alSourcef(source, AL_PITCH, 1.f); + alSourcef(source, AL_GAIN, 1.f); + alSource3f(source, AL_POSITION, 0, 0, 0); + alSource3f(source, AL_VELOCITY, 0, 0, 0); + alSourcei(source, AL_LOOPING, 0); + return source; +} + +ALuint CreateBuffer() { + auto buffer = ALuint(0); + alGenBuffers(1, &buffer); + return buffer; +} + +void CreateDefaultNotify() { + if (alIsSource(DefaultNotify.source)) { + return; + } + + DefaultNotify.source = CreateSource(); + DefaultNotify.buffer = CreateBuffer(); + + alBufferData(DefaultNotify.buffer, DefaultNotify.alFormat, DefaultNotify.data.constData(), DefaultNotify.data.size(), DefaultNotify.sampleRate); + alSourcei(DefaultNotify.source, AL_BUFFER, DefaultNotify.buffer); +} + +// can be called at any moment when audio error +void CloseAudioPlaybackDevice() { + if (!AudioDevice) return; + + delete base::take(MixerInstance); + + if (alIsSource(DefaultNotify.source)) { + alSourceStop(DefaultNotify.source); + } + if (alIsBuffer(DefaultNotify.buffer)) { + alDeleteBuffers(1, &DefaultNotify.buffer); + DefaultNotify.buffer = 0; + } + if (alIsSource(DefaultNotify.source)) { + alDeleteSources(1, &DefaultNotify.source); + DefaultNotify.source = 0; + } + + if (AudioContext) { + alcMakeContextCurrent(nullptr); + alcDestroyContext(AudioContext); + AudioContext = nullptr; + } + + if (AudioDevice) { + alcCloseDevice(AudioDevice); + AudioDevice = nullptr; + } +} + +} // namespace + +void InitAudio() { + t_assert(AudioDevice == nullptr); qRegisterMetaType(); qRegisterMetaType(); - player = new AudioPlayer(); - alcDevicePauseSOFT(audioDevice); + PrepareNotifySound(); - cSetHasAudioPlayer(true); + EnumeratePlaybackDevices(); + EnumerateCaptureDevices(); } -void audioPlayNotify() { - if (!audioPlayer()) return; - - audioPlayer()->resumeDevice(); - alSourcePlay(notifySource); - emit audioPlayer()->suppressAll(); - emit audioPlayer()->faderOnTimer(); +void DeInitAudio() { + CloseAudioPlaybackDevice(); } -// can be called at any moment when audio error -void audioFinish() { - if (player) { - delete player; - player = nullptr; - } - if (capture) { - delete capture; - capture = nullptr; - } - - alSourceStop(notifySource); - if (alIsBuffer(notifyBuffer)) { - alDeleteBuffers(1, ¬ifyBuffer); - notifyBuffer = 0; - } - if (alIsSource(notifySource)) { - alDeleteSources(1, ¬ifySource); - notifySource = 0; - } - - if (audioContext) { - alcMakeContextCurrent(nullptr); - alcDestroyContext(audioContext); - audioContext = nullptr; - } - - if (audioDevice) { - alcCloseDevice(audioDevice); - audioDevice = nullptr; - } - - cSetHasAudioCapture(false); - cSetHasAudioPlayer(false); +base::Observable &Updated() { + return UpdatedObservable; } -void AudioPlayer::AudioMsg::clear() { +bool CreateAudioPlaybackDevice() { + if (AudioDevice) return true; + + AudioDevice = alcOpenDevice(nullptr); + if (!AudioDevice) { + LOG(("Audio Error: Could not create default playback device, enumerating..")); + EnumeratePlaybackDevices(); + return false; + } + + ALCint attributes[] = { ALC_STEREO_SOURCES, 8, 0 }; + AudioContext = alcCreateContext(AudioDevice, attributes); + alcMakeContextCurrent(AudioContext); + if (ContextErrorHappened()) { + CloseAudioPlaybackDevice(); + return false; + } + + ALfloat v[] = { 0.f, 0.f, -1.f, 0.f, 1.f, 0.f }; + alListener3f(AL_POSITION, 0.f, 0.f, 0.f); + alListener3f(AL_VELOCITY, 0.f, 0.f, 0.f); + alListenerfv(AL_ORIENTATION, v); + + alDistanceModel(AL_NONE); + + MixerInstance = new Mixer(); + + return true; +} + +void PlayNotify() { + if (!mixer()) return; + if (!CreateAudioPlaybackDevice()) return; + + CreateDefaultNotify(); + alSourcePlay(DefaultNotify.source); + if (PlaybackErrorHappened()) { + CloseAudioPlaybackDevice(); + return; + } + + emit mixer()->suppressAll(); + emit mixer()->faderOnTimer(); +} + +bool NotifyIsPlaying() { + if (alIsSource(DefaultNotify.source)) { + ALint state = AL_INITIAL; + alGetSourcei(DefaultNotify.source, AL_SOURCE_STATE, &state); + if (!PlaybackErrorHappened() && state == AL_PLAYING) { + return true; + } + } + return false; +} + +Mixer *mixer() { + return MixerInstance; +} + +void Mixer::AudioMsg::clear() { audio = AudioMsgId(); file = FileLocation(); data = QByteArray(); @@ -286,9 +385,9 @@ void AudioPlayer::AudioMsg::clear() { videoPlayId = 0; } -AudioPlayer::AudioPlayer() -: _fader(new AudioPlayerFader(&_faderThread)) -, _loader(new AudioPlayerLoaders(&_loaderThread)) { +Mixer::Mixer() +: _fader(new Fader(&_faderThread)) +, _loader(new Loaders(&_loaderThread)) { connect(this, SIGNAL(faderOnTimer()), _fader, SLOT(onTimer())); connect(this, SIGNAL(suppressSong()), _fader, SLOT(onSuppressSong())); connect(this, SIGNAL(unsuppressSong()), _fader, SLOT(onUnsuppressSong())); @@ -299,7 +398,7 @@ AudioPlayer::AudioPlayer() subscribe(Global::RefVideoVolumeChanged(), [this] { QMetaObject::invokeMethod(_fader, "onVideoVolumeChanged"); }); - connect(this, SIGNAL(loaderOnStart(const AudioMsgId&,qint64)), _loader, SLOT(onStart(const AudioMsgId&,qint64))); + connect(this, SIGNAL(loaderOnStart(const AudioMsgId&, qint64)), _loader, SLOT(onStart(const AudioMsgId&, qint64))); connect(this, SIGNAL(loaderOnCancel(const AudioMsgId&)), _loader, SLOT(onCancel(const AudioMsgId&))); connect(_loader, SIGNAL(needToCheck()), _fader, SLOT(onTimer())); connect(_loader, SIGNAL(error(const AudioMsgId&)), this, SLOT(onError(const AudioMsgId&))); @@ -314,10 +413,10 @@ AudioPlayer::AudioPlayer() _faderThread.start(); } -AudioPlayer::~AudioPlayer() { +Mixer::~Mixer() { { - QMutexLocker lock(&playerMutex); - player = nullptr; + QMutexLocker lock(&AudioMutex); + MixerInstance = nullptr; } auto clearAudioMsg = [](AudioMsg *msg) { @@ -346,28 +445,28 @@ AudioPlayer::~AudioPlayer() { _loaderThread.wait(); } -void AudioPlayer::onUpdated(const AudioMsgId &audio) { +void Mixer::onUpdated(const AudioMsgId &audio) { if (audio.type() == AudioMsgId::Type::Video) { videoSoundProgress(audio); } - notify(audio); + Media::Player::Updated().notify(audio); } -void AudioPlayer::onError(const AudioMsgId &audio) { +void Mixer::onError(const AudioMsgId &audio) { emit stoppedOnError(audio); if (audio.type() == AudioMsgId::Type::Voice) { emit unsuppressSong(); } } -void AudioPlayer::onStopped(const AudioMsgId &audio) { +void Mixer::onStopped(const AudioMsgId &audio) { emit updated(audio); if (audio.type() == AudioMsgId::Type::Voice) { emit unsuppressSong(); } } -AudioPlayer::AudioMsg *AudioPlayer::dataForType(AudioMsgId::Type type, int index) { +Mixer::AudioMsg *Mixer::dataForType(AudioMsgId::Type type, int index) { if (index < 0) { if (auto indexPtr = currentIndex(type)) { index = *indexPtr; @@ -383,11 +482,11 @@ AudioPlayer::AudioMsg *AudioPlayer::dataForType(AudioMsgId::Type type, int index return nullptr; } -const AudioPlayer::AudioMsg *AudioPlayer::dataForType(AudioMsgId::Type type, int index) const { - return const_cast(this)->dataForType(type, index); +const Mixer::AudioMsg *Mixer::dataForType(AudioMsgId::Type type, int index) const { + return const_cast(this)->dataForType(type, index); } -int *AudioPlayer::currentIndex(AudioMsgId::Type type) { +int *Mixer::currentIndex(AudioMsgId::Type type) { switch (type) { case AudioMsgId::Type::Voice: return &_audioCurrent; case AudioMsgId::Type::Song: return &_songCurrent; @@ -396,11 +495,11 @@ int *AudioPlayer::currentIndex(AudioMsgId::Type type) { return nullptr; } -const int *AudioPlayer::currentIndex(AudioMsgId::Type type) const { - return const_cast(this)->currentIndex(type); +const int *Mixer::currentIndex(AudioMsgId::Type type) const { + return const_cast(this)->currentIndex(type); } -bool AudioPlayer::updateCurrentStarted(AudioMsgId::Type type, int32 pos) { +bool Mixer::updateCurrentStarted(AudioMsgId::Type type, int32 pos) { auto data = dataForType(type); if (!data) return false; @@ -410,7 +509,7 @@ bool AudioPlayer::updateCurrentStarted(AudioMsgId::Type type, int32 pos) { } else { pos = 0; } - if (!_checkALError()) { + if (Media::Player::PlaybackErrorHappened()) { setStoppedState(data, AudioPlayerStoppedAtError); onError(data->audio); return false; @@ -420,7 +519,7 @@ bool AudioPlayer::updateCurrentStarted(AudioMsgId::Type type, int32 pos) { return true; } -bool AudioPlayer::fadedStop(AudioMsgId::Type type, bool *fadedStart) { +bool Mixer::fadedStop(AudioMsgId::Type type, bool *fadedStart) { auto current = dataForType(type); if (!current) return false; @@ -428,28 +527,30 @@ bool AudioPlayer::fadedStop(AudioMsgId::Type type, bool *fadedStart) { case AudioPlayerStarting: case AudioPlayerResuming: case AudioPlayerPlaying: - current->playbackState.state = AudioPlayerFinishing; - updateCurrentStarted(type); - if (fadedStart) *fadedStart = true; - break; + current->playbackState.state = AudioPlayerFinishing; + updateCurrentStarted(type); + if (fadedStart) *fadedStart = true; + break; case AudioPlayerPausing: - current->playbackState.state = AudioPlayerFinishing; - if (fadedStart) *fadedStart = true; - break; + current->playbackState.state = AudioPlayerFinishing; + if (fadedStart) *fadedStart = true; + break; case AudioPlayerPaused: case AudioPlayerPausedAtEnd: - setStoppedState(current); - return true; + setStoppedState(current); + return true; } return false; } -void AudioPlayer::play(const AudioMsgId &audio, int64 position) { +void Mixer::play(const AudioMsgId &audio, int64 position) { + if (!Media::Player::CreateAudioPlaybackDevice()) return; + auto type = audio.type(); AudioMsgId stopped; auto notLoadedYet = false; { - QMutexLocker lock(&playerMutex); + QMutexLocker lock(&AudioMutex); bool fadedStart = false; auto current = dataForType(type); @@ -509,10 +610,10 @@ void AudioPlayer::play(const AudioMsgId &audio, int64 position) { } } -void AudioPlayer::initFromVideo(uint64 videoPlayId, std_::unique_ptr &&data, int64 position) { +void Mixer::initFromVideo(uint64 videoPlayId, std_::unique_ptr &&data, int64 position) { AudioMsgId stopped; { - QMutexLocker lock(&playerMutex); + QMutexLocker lock(&AudioMutex); // Pause current song. auto currentSong = dataForType(AudioMsgId::Type::Song); @@ -522,8 +623,8 @@ void AudioPlayer::initFromVideo(uint64 videoPlayId, std_::unique_ptrplaybackState.state = AudioPlayerPausing; - updateCurrentStarted(AudioMsgId::Type::Song); + currentSong->playbackState.state = AudioPlayerPausing; + updateCurrentStarted(AudioMsgId::Type::Song); break; case AudioPlayerFinishing: currentSong->playbackState.state = AudioPlayerPausing; break; } @@ -557,10 +658,10 @@ void AudioPlayer::initFromVideo(uint64 videoPlayId, std_::unique_ptrresumeDevice(); - alSourcef(data->source, AL_GAIN, suppressGain); if (!checkCurrentALError(type)) return; @@ -665,11 +764,11 @@ void AudioPlayer::resumeFromVideo(uint64 videoPlayId) { if (current) emit updated(current); } -void AudioPlayer::feedFromVideo(VideoSoundPart &&part) { +void Mixer::feedFromVideo(VideoSoundPart &&part) { _loader->feedFromVideo(std_::move(part)); } -TimeMs AudioPlayer::getVideoCorrectedTime(uint64 playId, TimeMs frameMs, TimeMs systemMs) { +TimeMs Mixer::getVideoCorrectedTime(uint64 playId, TimeMs frameMs, TimeMs systemMs) { auto result = frameMs; QMutexLocker videoLock(&_lastVideoMutex); @@ -683,11 +782,11 @@ TimeMs AudioPlayer::getVideoCorrectedTime(uint64 playId, TimeMs frameMs, TimeMs return result; } -void AudioPlayer::videoSoundProgress(const AudioMsgId &audio) { +void Mixer::videoSoundProgress(const AudioMsgId &audio) { auto type = audio.type(); t_assert(type == AudioMsgId::Type::Video); - QMutexLocker lock(&playerMutex); + QMutexLocker lock(&AudioMutex); QMutexLocker videoLock(&_lastVideoMutex); auto current = dataForType(type); @@ -701,8 +800,8 @@ void AudioPlayer::videoSoundProgress(const AudioMsgId &audio) { } } -bool AudioPlayer::checkCurrentALError(AudioMsgId::Type type) { - if (_checkALError()) return true; +bool Mixer::checkCurrentALError(AudioMsgId::Type type) { + if (!Media::Player::PlaybackErrorHappened()) return true; auto data = dataForType(type); if (!data) { @@ -712,8 +811,8 @@ bool AudioPlayer::checkCurrentALError(AudioMsgId::Type type) { return false; } -void AudioPlayer::pauseresume(AudioMsgId::Type type, bool fast) { - QMutexLocker lock(&playerMutex); +void Mixer::pauseresume(AudioMsgId::Type type, bool fast) { + QMutexLocker lock(&AudioMutex); auto current = dataForType(type); float64 suppressGain = 1.; @@ -742,8 +841,6 @@ void AudioPlayer::pauseresume(AudioMsgId::Type type, bool fast) { if (!checkCurrentALError(type)) return; if (state != AL_PLAYING) { - audioPlayer()->resumeDevice(); - alSourcef(current->source, AL_GAIN, suppressGain); if (!checkCurrentALError(type)) return; @@ -755,17 +852,17 @@ void AudioPlayer::pauseresume(AudioMsgId::Type type, bool fast) { case AudioPlayerStarting: case AudioPlayerResuming: case AudioPlayerPlaying: - current->playbackState.state = AudioPlayerPausing; - updateCurrentStarted(type); - if (type == AudioMsgId::Type::Voice) emit unsuppressSong(); + current->playbackState.state = AudioPlayerPausing; + updateCurrentStarted(type); + if (type == AudioMsgId::Type::Voice) emit unsuppressSong(); break; case AudioPlayerFinishing: current->playbackState.state = AudioPlayerPausing; break; } emit faderOnTimer(); } -void AudioPlayer::seek(int64 position) { - QMutexLocker lock(&playerMutex); +void Mixer::seek(int64 position) { + QMutexLocker lock(&AudioMutex); auto type = AudioMsgId::Type::Song; auto current = dataForType(type); @@ -801,25 +898,25 @@ void AudioPlayer::seek(int64 position) { case AudioPlayerStarting: case AudioPlayerResuming: case AudioPlayerPlaying: - current->playbackState.state = AudioPlayerPausing; - updateCurrentStarted(type); - if (type == AudioMsgId::Type::Voice) emit unsuppressSong(); - break; + current->playbackState.state = AudioPlayerPausing; + updateCurrentStarted(type); + if (type == AudioMsgId::Type::Voice) emit unsuppressSong(); + break; case AudioPlayerFinishing: case AudioPlayerStopped: case AudioPlayerStoppedAtEnd: case AudioPlayerStoppedAtError: case AudioPlayerStoppedAtStart: - lock.unlock(); - return play(audio, position); + lock.unlock(); + return play(audio, position); } emit faderOnTimer(); } -void AudioPlayer::stop(AudioMsgId::Type type) { +void Mixer::stop(AudioMsgId::Type type) { AudioMsgId current; { - QMutexLocker lock(&playerMutex); + QMutexLocker lock(&AudioMutex); auto data = dataForType(type); t_assert(data != nullptr); @@ -832,10 +929,10 @@ void AudioPlayer::stop(AudioMsgId::Type type) { if (current) emit updated(current); } -void AudioPlayer::stopAndClear() { +void Mixer::stopAndClear() { AudioMsg *current_audio = nullptr, *current_song = nullptr; { - QMutexLocker lock(&playerMutex); + QMutexLocker lock(&AudioMutex); if ((current_audio = dataForType(AudioMsgId::Type::Voice))) { setStoppedState(current_audio); } @@ -850,7 +947,7 @@ void AudioPlayer::stopAndClear() { emit updated(current_audio->audio); } { - QMutexLocker lock(&playerMutex); + QMutexLocker lock(&AudioMutex); auto clearAndCancel = [this](AudioMsgId::Type type, int index) { auto data = dataForType(type, index); if (data->audio) { @@ -867,16 +964,16 @@ void AudioPlayer::stopAndClear() { } } -AudioPlaybackState AudioPlayer::currentVideoState(uint64 videoPlayId) { - QMutexLocker lock(&playerMutex); +AudioPlaybackState Mixer::currentVideoState(uint64 videoPlayId) { + QMutexLocker lock(&AudioMutex); auto current = dataForType(AudioMsgId::Type::Video); if (!current || current->videoPlayId != videoPlayId) return AudioPlaybackState(); return current->playbackState; } -AudioPlaybackState AudioPlayer::currentState(AudioMsgId *audio, AudioMsgId::Type type) { - QMutexLocker lock(&playerMutex); +AudioPlaybackState Mixer::currentState(AudioMsgId *audio, AudioMsgId::Type type) { + QMutexLocker lock(&AudioMutex); auto current = dataForType(type); if (!current) return AudioPlaybackState(); @@ -884,80 +981,20 @@ AudioPlaybackState AudioPlayer::currentState(AudioMsgId *audio, AudioMsgId::Type return current->playbackState; } -void AudioPlayer::setStoppedState(AudioMsg *current, AudioPlayerState state) { +void Mixer::setStoppedState(AudioMsg *current, AudioPlayerState state) { current->playbackState.state = state; current->playbackState.position = 0; } -void AudioPlayer::clearStoppedAtStart(const AudioMsgId &audio) { - QMutexLocker lock(&playerMutex); +void Mixer::clearStoppedAtStart(const AudioMsgId &audio) { + QMutexLocker lock(&AudioMutex); auto data = dataForType(audio.type()); if (data && data->audio == audio && data->playbackState.state == AudioPlayerStoppedAtStart) { setStoppedState(data); } } -void AudioPlayer::resumeDevice() { - _fader->resumeDevice(); -} - - -namespace internal { - -QMutex *audioPlayerMutex() { - return &playerMutex; -} - -float64 audioSuppressGain() { - return suppressAllGain; -} - -float64 audioSuppressSongGain() { - return suppressSongGain; -} - -bool audioCheckError() { - return _checkALError(); -} - -} // namespace internal - -AudioCapture::AudioCapture() : _capture(new AudioCaptureInner(&_captureThread)) { - connect(this, SIGNAL(start()), _capture, SLOT(onStart())); - connect(this, SIGNAL(stop(bool)), _capture, SLOT(onStop(bool))); - connect(_capture, SIGNAL(done(QByteArray,VoiceWaveform,qint32)), this, SIGNAL(done(QByteArray,VoiceWaveform,qint32))); - connect(_capture, SIGNAL(updated(quint16,qint32)), this, SIGNAL(updated(quint16,qint32))); - connect(_capture, SIGNAL(error()), this, SIGNAL(error())); - connect(&_captureThread, SIGNAL(started()), _capture, SLOT(onInit())); - connect(&_captureThread, SIGNAL(finished()), _capture, SLOT(deleteLater())); - _captureThread.start(); -} - -bool AudioCapture::check() { - if (auto defaultDevice = alcGetString(0, ALC_CAPTURE_DEFAULT_DEVICE_SPECIFIER)) { - if (auto device = alcCaptureOpenDevice(defaultDevice, AudioVoiceMsgFrequency, AL_FORMAT_MONO16, AudioVoiceMsgFrequency / 5)) { - alcCaptureCloseDevice(device); - return _checkALCError(); - } - } - return false; -} - -AudioCapture::~AudioCapture() { - capture = nullptr; - _captureThread.quit(); - _captureThread.wait(); -} - -AudioPlayer *audioPlayer() { - return player; -} - -AudioCapture *audioCapture() { - return capture; -} - -AudioPlayerFader::AudioPlayerFader(QThread *thread) : QObject() +Fader::Fader(QThread *thread) : QObject() , _timer(this) , _suppressAllGain(1., 1.) , _suppressSongGain(1., 1.) { @@ -975,20 +1012,21 @@ AudioPlayerFader::AudioPlayerFader(QThread *thread) : QObject() connect(this, SIGNAL(stopPauseDevice()), this, SLOT(onPauseTimerStop()), Qt::QueuedConnection); } -void AudioPlayerFader::onInit() { +void Fader::onInit() { } -void AudioPlayerFader::onTimer() { - QMutexLocker lock(&playerMutex); - AudioPlayer *voice = audioPlayer(); - if (!voice) return; +void Fader::onTimer() { + QMutexLocker lock(&AudioMutex); + auto player = mixer(); + if (!player) return; bool suppressAudioChanged = false, suppressSongChanged = false; if (_suppressAll || _suppressSongAnim) { auto ms = getms(); - float64 wasSong = suppressSongGain; + auto wasSong = suppressSongGain; if (_suppressAll) { - float64 wasAudio = suppressAllGain; + auto notifyLengthMs = Media::Player::DefaultNotify.lengthMs; + auto wasAudio = suppressAllGain; if (ms >= _suppressAllStart + notifyLengthMs || ms < _suppressAllStart) { _suppressAll = _suppressAllAnim = false; _suppressAllGain = anim::value(1., 1.); @@ -1020,8 +1058,8 @@ void AudioPlayerFader::onTimer() { bool hasFading = (_suppressAll || _suppressSongAnim); bool hasPlaying = false; - auto updatePlayback = [this, voice, &hasPlaying, &hasFading](AudioMsgId::Type type, int index, float64 suppressGain, bool suppressGainChanged) { - auto data = voice->dataForType(type, index); + auto updatePlayback = [this, player, &hasPlaying, &hasFading](AudioMsgId::Type type, int index, float64 suppressGain, bool suppressGainChanged) { + auto data = player->dataForType(type, index); if ((data->playbackState.state & AudioPlayerStoppedMask) || data->playbackState.state == AudioPlayerPaused || !data->source) return; int32 emitSignals = updateOnePlayback(data, hasPlaying, hasFading, suppressGain, suppressGainChanged); @@ -1042,14 +1080,8 @@ void AudioPlayerFader::onTimer() { _songVolumeChanged = _videoVolumeChanged = false; - if (!hasFading) { - if (!hasPlaying) { - ALint state = AL_INITIAL; - alGetSourcei(notifySource, AL_SOURCE_STATE, &state); - if (_checkALError() && state == AL_PLAYING) { - hasPlaying = true; - } - } + if (!hasFading && !hasPlaying && Media::Player::NotifyIsPlaying()) { + hasPlaying = true; } if (hasFading) { _timer.start(AudioFadeTimeout); @@ -1064,15 +1096,15 @@ void AudioPlayerFader::onTimer() { } } -int32 AudioPlayerFader::updateOnePlayback(AudioPlayer::AudioMsg *m, bool &hasPlaying, bool &hasFading, float64 suppressGain, bool suppressGainChanged) { +int32 Fader::updateOnePlayback(Mixer::AudioMsg *m, bool &hasPlaying, bool &hasFading, float64 suppressGain, bool suppressGainChanged) { bool playing = false, fading = false; ALint pos = 0; ALint state = AL_INITIAL; alGetSourcei(m->source, AL_SAMPLE_OFFSET, &pos); - if (!_checkALError()) { setStoppedState(m, AudioPlayerStoppedAtError); return EmitError; } + if (Media::Player::PlaybackErrorHappened()) { setStoppedState(m, AudioPlayerStoppedAtError); return EmitError; } alGetSourcei(m->source, AL_SOURCE_STATE, &state); - if (!_checkALError()) { setStoppedState(m, AudioPlayerStoppedAtError); return EmitError; } + if (Media::Player::PlaybackErrorHappened()) { setStoppedState(m, AudioPlayerStoppedAtError); return EmitError; } int32 emitSignals = 0; switch (m->playbackState.state) { @@ -1080,20 +1112,20 @@ int32 AudioPlayerFader::updateOnePlayback(AudioPlayer::AudioMsg *m, bool &hasPla case AudioPlayerPausing: case AudioPlayerStarting: case AudioPlayerResuming: - fading = true; - break; + fading = true; + break; case AudioPlayerPlaying: - playing = true; - break; + playing = true; + break; } if (fading && (state == AL_PLAYING || !m->loading)) { if (state != AL_PLAYING) { fading = false; if (m->source) { alSourceStop(m->source); - if (!_checkALError()) { setStoppedState(m, AudioPlayerStoppedAtError); return EmitError; } + if (Media::Player::PlaybackErrorHappened()) { setStoppedState(m, AudioPlayerStoppedAtError); return EmitError; } alSourcef(m->source, AL_GAIN, 1); - if (!_checkALError()) { setStoppedState(m, AudioPlayerStoppedAtError); return EmitError; } + if (Media::Player::PlaybackErrorHappened()) { setStoppedState(m, AudioPlayerStoppedAtError); return EmitError; } } if (m->playbackState.state == AudioPlayerPausing) { m->playbackState.state = AudioPlayerPausedAtEnd; @@ -1104,24 +1136,24 @@ int32 AudioPlayerFader::updateOnePlayback(AudioPlayer::AudioMsg *m, bool &hasPla } else if (1000 * (pos + m->skipStart - m->started) >= AudioFadeDuration * m->playbackState.frequency) { fading = false; alSourcef(m->source, AL_GAIN, 1. * suppressGain); - if (!_checkALError()) { setStoppedState(m, AudioPlayerStoppedAtError); return EmitError; } + if (Media::Player::PlaybackErrorHappened()) { setStoppedState(m, AudioPlayerStoppedAtError); return EmitError; } switch (m->playbackState.state) { case AudioPlayerFinishing: - alSourceStop(m->source); - if (!_checkALError()) { setStoppedState(m, AudioPlayerStoppedAtError); return EmitError; } - setStoppedState(m); - state = AL_STOPPED; - break; + alSourceStop(m->source); + if (Media::Player::PlaybackErrorHappened()) { setStoppedState(m, AudioPlayerStoppedAtError); return EmitError; } + setStoppedState(m); + state = AL_STOPPED; + break; case AudioPlayerPausing: - alSourcePause(m->source); - if (!_checkALError()) { setStoppedState(m, AudioPlayerStoppedAtError); return EmitError; } - m->playbackState.state = AudioPlayerPaused; - break; + alSourcePause(m->source); + if (Media::Player::PlaybackErrorHappened()) { setStoppedState(m, AudioPlayerStoppedAtError); return EmitError; } + m->playbackState.state = AudioPlayerPaused; + break; case AudioPlayerStarting: case AudioPlayerResuming: - m->playbackState.state = AudioPlayerPlaying; - playing = true; - break; + m->playbackState.state = AudioPlayerPlaying; + playing = true; + break; } } else { float64 newGain = 1000. * (pos + m->skipStart - m->started) / (AudioFadeDuration * m->playbackState.frequency); @@ -1129,22 +1161,22 @@ int32 AudioPlayerFader::updateOnePlayback(AudioPlayer::AudioMsg *m, bool &hasPla newGain = 1. - newGain; } alSourcef(m->source, AL_GAIN, newGain * suppressGain); - if (!_checkALError()) { setStoppedState(m, AudioPlayerStoppedAtError); return EmitError; } + if (Media::Player::PlaybackErrorHappened()) { setStoppedState(m, AudioPlayerStoppedAtError); return EmitError; } } } else if (playing && (state == AL_PLAYING || !m->loading)) { if (state != AL_PLAYING) { playing = false; if (m->source) { alSourceStop(m->source); - if (!_checkALError()) { setStoppedState(m, AudioPlayerStoppedAtError); return EmitError; } + if (Media::Player::PlaybackErrorHappened()) { setStoppedState(m, AudioPlayerStoppedAtError); return EmitError; } alSourcef(m->source, AL_GAIN, 1); - if (!_checkALError()) { setStoppedState(m, AudioPlayerStoppedAtError); return EmitError; } + if (Media::Player::PlaybackErrorHappened()) { setStoppedState(m, AudioPlayerStoppedAtError); return EmitError; } } setStoppedState(m, AudioPlayerStoppedAtEnd); emitSignals |= EmitStopped; } else if (suppressGainChanged) { alSourcef(m->source, AL_GAIN, suppressGain); - if (!_checkALError()) { setStoppedState(m, AudioPlayerStoppedAtError); return EmitError; } + if (Media::Player::PlaybackErrorHappened()) { setStoppedState(m, AudioPlayerStoppedAtError); return EmitError; } } } if (state == AL_PLAYING && pos + m->skipStart - m->playbackState.position >= AudioCheckPositionDelta) { @@ -1163,24 +1195,24 @@ int32 AudioPlayerFader::updateOnePlayback(AudioPlayer::AudioMsg *m, bool &hasPla return emitSignals; } -void AudioPlayerFader::setStoppedState(AudioPlayer::AudioMsg *m, AudioPlayerState state) { +void Fader::setStoppedState(Mixer::AudioMsg *m, AudioPlayerState state) { m->playbackState.state = state; m->playbackState.position = 0; } -void AudioPlayerFader::onPauseTimer() { +void Fader::onPauseTimer() { QMutexLocker lock(&_pauseMutex); if (_pauseFlag) { _paused = true; - alcDevicePauseSOFT(audioDevice); + alcDevicePauseSOFT(AudioDevice); } } -void AudioPlayerFader::onPauseTimerStop() { +void Fader::onPauseTimerStop() { if (_pauseTimer.isActive()) _pauseTimer.stop(); } -void AudioPlayerFader::onSuppressSong() { +void Fader::onSuppressSong() { if (!_suppressSong) { _suppressSong = true; _suppressSongAnim = true; @@ -1190,7 +1222,7 @@ void AudioPlayerFader::onSuppressSong() { } } -void AudioPlayerFader::onUnsuppressSong() { +void Fader::onUnsuppressSong() { if (_suppressSong) { _suppressSong = false; _suppressSongAnim = true; @@ -1200,657 +1232,55 @@ void AudioPlayerFader::onUnsuppressSong() { } } -void AudioPlayerFader::onSuppressAll() { +void Fader::onSuppressAll() { _suppressAll = true; _suppressAllStart = getms(); _suppressAllGain.start(st::suppressAll); onTimer(); } -void AudioPlayerFader::onSongVolumeChanged() { +void Fader::onSongVolumeChanged() { _songVolumeChanged = true; onTimer(); } -void AudioPlayerFader::onVideoVolumeChanged() { +void Fader::onVideoVolumeChanged() { _videoVolumeChanged = true; onTimer(); } -void AudioPlayerFader::resumeDevice() { +void Fader::resumeDevice() { QMutexLocker lock(&_pauseMutex); _pauseFlag = false; emit stopPauseDevice(); if (_paused) { _paused = false; - alcDeviceResumeSOFT(audioDevice); + alcDeviceResumeSOFT(AudioDevice); } } -struct AudioCapturePrivate { - AudioCapturePrivate() - : device(0) - , fmt(0) - , ioBuffer(0) - , ioContext(0) - , fmtContext(0) - , stream(0) - , codec(0) - , codecContext(0) - , opened(false) - , srcSamples(0) - , dstSamples(0) - , maxDstSamples(0) - , dstSamplesSize(0) - , fullSamples(0) - , srcSamplesData(0) - , dstSamplesData(0) - , swrContext(0) - , lastUpdate(0) - , levelMax(0) - , dataPos(0) - , waveformMod(0) - , waveformEach(AudioVoiceMsgFrequency / 100) - , waveformPeak(0) { - } - ALCdevice *device; - AVOutputFormat *fmt; - uchar *ioBuffer; - AVIOContext *ioContext; - AVFormatContext *fmtContext; - AVStream *stream; - AVCodec *codec; - AVCodecContext *codecContext; - bool opened; +} // namespace Player +} // namespace Media - int32 srcSamples, dstSamples, maxDstSamples, dstSamplesSize, fullSamples; - uint8_t **srcSamplesData, **dstSamplesData; - SwrContext *swrContext; +namespace internal { - int32 lastUpdate; - uint16 levelMax; - - QByteArray data; - int32 dataPos; - - int64 waveformMod, waveformEach; - uint16 waveformPeak; - QVector waveform; - - static int _read_data(void *opaque, uint8_t *buf, int buf_size) { - AudioCapturePrivate *l = reinterpret_cast(opaque); - - int32 nbytes = qMin(l->data.size() - l->dataPos, int32(buf_size)); - if (nbytes <= 0) { - return 0; - } - - memcpy(buf, l->data.constData() + l->dataPos, nbytes); - l->dataPos += nbytes; - return nbytes; - } - - static int _write_data(void *opaque, uint8_t *buf, int buf_size) { - AudioCapturePrivate *l = reinterpret_cast(opaque); - - if (buf_size <= 0) return 0; - if (l->dataPos + buf_size > l->data.size()) l->data.resize(l->dataPos + buf_size); - memcpy(l->data.data() + l->dataPos, buf, buf_size); - l->dataPos += buf_size; - return buf_size; - } - - static int64_t _seek_data(void *opaque, int64_t offset, int whence) { - AudioCapturePrivate *l = reinterpret_cast(opaque); - - int32 newPos = -1; - switch (whence) { - case SEEK_SET: newPos = offset; break; - case SEEK_CUR: newPos = l->dataPos + offset; break; - case SEEK_END: newPos = l->data.size() + offset; break; - } - if (newPos < 0) { - return -1; - } - l->dataPos = newPos; - return l->dataPos; - } -}; - -AudioCaptureInner::AudioCaptureInner(QThread *thread) : d(new AudioCapturePrivate()) { - moveToThread(thread); - _timer.moveToThread(thread); - connect(&_timer, SIGNAL(timeout()), this, SLOT(onTimeout())); +QMutex *audioPlayerMutex() { + return &AudioMutex; } -AudioCaptureInner::~AudioCaptureInner() { - onStop(false); - delete d; +float64 audioSuppressGain() { + return suppressAllGain; } -void AudioCaptureInner::onInit() { +float64 audioSuppressSongGain() { + return suppressSongGain; } -void AudioCaptureInner::onStart() { - - // Start OpenAL Capture - const ALCchar *dName = alcGetString(0, ALC_CAPTURE_DEFAULT_DEVICE_SPECIFIER); - DEBUG_LOG(("Audio Info: Capture device name '%1'").arg(dName)); - d->device = alcCaptureOpenDevice(dName, AudioVoiceMsgFrequency, AL_FORMAT_MONO16, AudioVoiceMsgFrequency / 5); - if (!d->device) { - LOG(("Audio Error: capture device not present!")); - emit error(); - return; - } - alcCaptureStart(d->device); - if (!_checkCaptureError(d->device)) { - alcCaptureCloseDevice(d->device); - d->device = 0; - emit error(); - return; - } - - // Create encoding context - - d->ioBuffer = (uchar*)av_malloc(AVBlockSize); - - d->ioContext = avio_alloc_context(d->ioBuffer, AVBlockSize, 1, static_cast(d), &AudioCapturePrivate::_read_data, &AudioCapturePrivate::_write_data, &AudioCapturePrivate::_seek_data); - int res = 0; - char err[AV_ERROR_MAX_STRING_SIZE] = { 0 }; - AVOutputFormat *fmt = 0; - while ((fmt = av_oformat_next(fmt))) { - if (fmt->name == qstr("opus")) { - break; - } - } - if (!fmt) { - LOG(("Audio Error: Unable to find opus AVOutputFormat for capture")); - onStop(false); - emit error(); - return; - } - - if ((res = avformat_alloc_output_context2(&d->fmtContext, fmt, 0, 0)) < 0) { - LOG(("Audio Error: Unable to avformat_alloc_output_context2 for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res))); - onStop(false); - emit error(); - return; - } - d->fmtContext->pb = d->ioContext; - d->fmtContext->flags |= AVFMT_FLAG_CUSTOM_IO; - d->opened = true; - - // Add audio stream - d->codec = avcodec_find_encoder(fmt->audio_codec); - if (!d->codec) { - LOG(("Audio Error: Unable to avcodec_find_encoder for capture")); - onStop(false); - emit error(); - return; - } - d->stream = avformat_new_stream(d->fmtContext, d->codec); - if (!d->stream) { - LOG(("Audio Error: Unable to avformat_new_stream for capture")); - onStop(false); - emit error(); - return; - } - d->stream->id = d->fmtContext->nb_streams - 1; - d->codecContext = avcodec_alloc_context3(d->codec); - if (!d->codecContext) { - LOG(("Audio Error: Unable to avcodec_alloc_context3 for capture")); - onStop(false); - emit error(); - return; - } - - av_opt_set_int(d->codecContext, "refcounted_frames", 1, 0); - - d->codecContext->sample_fmt = AV_SAMPLE_FMT_FLTP; - d->codecContext->bit_rate = 64000; - d->codecContext->channel_layout = AV_CH_LAYOUT_MONO; - d->codecContext->sample_rate = AudioVoiceMsgFrequency; - d->codecContext->channels = 1; - - if (d->fmtContext->oformat->flags & AVFMT_GLOBALHEADER) { - d->codecContext->flags |= CODEC_FLAG_GLOBAL_HEADER; - } - - // Open audio stream - if ((res = avcodec_open2(d->codecContext, d->codec, nullptr)) < 0) { - LOG(("Audio Error: Unable to avcodec_open2 for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res))); - onStop(false); - emit error(); - return; - } - - // Alloc source samples - - d->srcSamples = (d->codecContext->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE) ? 10000 : d->codecContext->frame_size; - //if ((res = av_samples_alloc_array_and_samples(&d->srcSamplesData, 0, d->codecContext->channels, d->srcSamples, d->codecContext->sample_fmt, 0)) < 0) { - // LOG(("Audio Error: Unable to av_samples_alloc_array_and_samples for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res))); - // onStop(false); - // emit error(); - // return; - //} - // Using _captured directly - - // Prepare resampling - d->swrContext = swr_alloc(); - if (!d->swrContext) { - fprintf(stderr, "Could not allocate resampler context\n"); - exit(1); - } - - av_opt_set_int(d->swrContext, "in_channel_count", d->codecContext->channels, 0); - av_opt_set_int(d->swrContext, "in_sample_rate", d->codecContext->sample_rate, 0); - av_opt_set_sample_fmt(d->swrContext, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0); - av_opt_set_int(d->swrContext, "out_channel_count", d->codecContext->channels, 0); - av_opt_set_int(d->swrContext, "out_sample_rate", d->codecContext->sample_rate, 0); - av_opt_set_sample_fmt(d->swrContext, "out_sample_fmt", d->codecContext->sample_fmt, 0); - - if ((res = swr_init(d->swrContext)) < 0) { - LOG(("Audio Error: Unable to swr_init for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res))); - onStop(false); - emit error(); - return; - } - - d->maxDstSamples = d->srcSamples; - if ((res = av_samples_alloc_array_and_samples(&d->dstSamplesData, 0, d->codecContext->channels, d->maxDstSamples, d->codecContext->sample_fmt, 0)) < 0) { - LOG(("Audio Error: Unable to av_samples_alloc_array_and_samples for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res))); - onStop(false); - emit error(); - return; - } - d->dstSamplesSize = av_samples_get_buffer_size(0, d->codecContext->channels, d->maxDstSamples, d->codecContext->sample_fmt, 0); - - if ((res = avcodec_parameters_from_context(d->stream->codecpar, d->codecContext)) < 0) { - LOG(("Audio Error: Unable to avcodec_parameters_from_context for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res))); - onStop(false); - emit error(); - return; - } - - // Write file header - if ((res = avformat_write_header(d->fmtContext, 0)) < 0) { - LOG(("Audio Error: Unable to avformat_write_header for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res))); - onStop(false); - emit error(); - return; - } - - _timer.start(50); - _captured.clear(); - _captured.reserve(AudioVoiceMsgBufferSize); - DEBUG_LOG(("Audio Capture: started!")); +bool audioCheckError() { + return !Media::Player::PlaybackErrorHappened(); } -void AudioCaptureInner::onStop(bool needResult) { - if (!_timer.isActive()) return; // in onStop() already - _timer.stop(); - - if (d->device) { - alcCaptureStop(d->device); - onTimeout(); // get last data - } - - // Write what is left - if (!_captured.isEmpty()) { - int32 fadeSamples = AudioVoiceMsgFade * AudioVoiceMsgFrequency / 1000, capturedSamples = _captured.size() / sizeof(short); - if ((_captured.size() % sizeof(short)) || (d->fullSamples + capturedSamples < AudioVoiceMsgFrequency) || (capturedSamples < fadeSamples)) { - d->fullSamples = 0; - d->dataPos = 0; - d->data.clear(); - d->waveformMod = 0; - d->waveformPeak = 0; - d->waveform.clear(); - } else { - float64 coef = 1. / fadeSamples, fadedFrom = 0; - for (short *ptr = ((short*)_captured.data()) + capturedSamples, *end = ptr - fadeSamples; ptr != end; ++fadedFrom) { - --ptr; - *ptr = qRound(fadedFrom * coef * *ptr); - } - if (capturedSamples % d->srcSamples) { - int32 s = _captured.size(); - _captured.resize(s + (d->srcSamples - (capturedSamples % d->srcSamples)) * sizeof(short)); - memset(_captured.data() + s, 0, _captured.size() - s); - } - - int32 framesize = d->srcSamples * d->codecContext->channels * sizeof(short), encoded = 0; - while (_captured.size() >= encoded + framesize) { - processFrame(encoded, framesize); - encoded += framesize; - } - writeFrame(nullptr); // drain the codec - if (encoded != _captured.size()) { - d->fullSamples = 0; - d->dataPos = 0; - d->data.clear(); - d->waveformMod = 0; - d->waveformPeak = 0; - d->waveform.clear(); - } - } - } - DEBUG_LOG(("Audio Capture: stopping (need result: %1), size: %2, samples: %3").arg(Logs::b(needResult)).arg(d->data.size()).arg(d->fullSamples)); - _captured = QByteArray(); - - // Finish stream - if (d->device) { - av_write_trailer(d->fmtContext); - } - - QByteArray result = d->fullSamples ? d->data : QByteArray(); - VoiceWaveform waveform; - qint32 samples = d->fullSamples; - if (samples && !d->waveform.isEmpty()) { - int64 count = d->waveform.size(), sum = 0; - if (count >= WaveformSamplesCount) { - QVector peaks; - peaks.reserve(WaveformSamplesCount); - - uint16 peak = 0; - for (int32 i = 0; i < count; ++i) { - uint16 sample = uint16(d->waveform.at(i)) * 256; - if (peak < sample) { - peak = sample; - } - sum += WaveformSamplesCount; - if (sum >= count) { - sum -= count; - peaks.push_back(peak); - peak = 0; - } - } - - int64 sum = std::accumulate(peaks.cbegin(), peaks.cend(), 0ULL); - peak = qMax(int32(sum * 1.8 / peaks.size()), 2500); - - waveform.resize(peaks.size()); - for (int32 i = 0, l = peaks.size(); i != l; ++i) { - waveform[i] = char(qMin(31U, uint32(qMin(peaks.at(i), peak)) * 31 / peak)); - } - } - } - if (d->device) { - alcCaptureStop(d->device); - alcCaptureCloseDevice(d->device); - d->device = nullptr; - - if (d->codecContext) { - avcodec_free_context(&d->codecContext); - d->codecContext = nullptr; - } - if (d->srcSamplesData) { - if (d->srcSamplesData[0]) { - av_freep(&d->srcSamplesData[0]); - } - av_freep(&d->srcSamplesData); - } - if (d->dstSamplesData) { - if (d->dstSamplesData[0]) { - av_freep(&d->dstSamplesData[0]); - } - av_freep(&d->dstSamplesData); - } - d->fullSamples = 0; - if (d->swrContext) { - swr_free(&d->swrContext); - d->swrContext = nullptr; - } - if (d->opened) { - avformat_close_input(&d->fmtContext); - d->opened = false; - } - if (d->ioContext) { - av_freep(&d->ioContext->buffer); - av_freep(&d->ioContext); - d->ioBuffer = nullptr; - } else if (d->ioBuffer) { - av_freep(&d->ioBuffer); - } - if (d->fmtContext) { - avformat_free_context(d->fmtContext); - d->fmtContext = nullptr; - } - d->fmt = nullptr; - d->stream = nullptr; - d->codec = nullptr; - - d->lastUpdate = 0; - d->levelMax = 0; - - d->dataPos = 0; - d->data.clear(); - - d->waveformMod = 0; - d->waveformPeak = 0; - d->waveform.clear(); - } - if (needResult) emit done(result, waveform, samples); -} - -void AudioCaptureInner::onTimeout() { - if (!d->device) { - _timer.stop(); - return; - } - ALint samples; - alcGetIntegerv(d->device, ALC_CAPTURE_SAMPLES, sizeof(samples), &samples); - if (!_checkCaptureError(d->device)) { - onStop(false); - emit error(); - return; - } - if (samples > 0) { - // Get samples from OpenAL - int32 s = _captured.size(), news = s + samples * sizeof(short); - if (news / AudioVoiceMsgBufferSize > s / AudioVoiceMsgBufferSize) { - _captured.reserve(((news / AudioVoiceMsgBufferSize) + 1) * AudioVoiceMsgBufferSize); - } - _captured.resize(news); - alcCaptureSamples(d->device, (ALCvoid *)(_captured.data() + s), samples); - if (!_checkCaptureError(d->device)) { - onStop(false); - emit error(); - return; - } - - // Count new recording level and update view - int32 skipSamples = AudioVoiceMsgSkip * AudioVoiceMsgFrequency / 1000, fadeSamples = AudioVoiceMsgFade * AudioVoiceMsgFrequency / 1000; - int32 levelindex = d->fullSamples + (s / sizeof(short)); - for (const short *ptr = (const short*)(_captured.constData() + s), *end = (const short*)(_captured.constData() + news); ptr < end; ++ptr, ++levelindex) { - if (levelindex > skipSamples) { - uint16 value = qAbs(*ptr); - if (levelindex < skipSamples + fadeSamples) { - value = qRound(value * float64(levelindex - skipSamples) / fadeSamples); - } - if (d->levelMax < value) { - d->levelMax = value; - } - } - } - qint32 samplesFull = d->fullSamples + _captured.size() / sizeof(short), samplesSinceUpdate = samplesFull - d->lastUpdate; - if (samplesSinceUpdate > AudioVoiceMsgUpdateView * AudioVoiceMsgFrequency / 1000) { - emit updated(d->levelMax, samplesFull); - d->lastUpdate = samplesFull; - d->levelMax = 0; - } - // Write frames - int32 framesize = d->srcSamples * d->codecContext->channels * sizeof(short), encoded = 0; - while (uint32(_captured.size()) >= encoded + framesize + fadeSamples * sizeof(short)) { - processFrame(encoded, framesize); - encoded += framesize; - } - - // Collapse the buffer - if (encoded > 0) { - int32 goodSize = _captured.size() - encoded; - memmove(_captured.data(), _captured.constData() + encoded, goodSize); - _captured.resize(goodSize); - } - } else { - DEBUG_LOG(("Audio Capture: no samples to capture.")); - } -} - -void AudioCaptureInner::processFrame(int32 offset, int32 framesize) { - // Prepare audio frame - - if (framesize % sizeof(short)) { // in the middle of a sample - LOG(("Audio Error: Bad framesize in writeFrame() for capture, framesize %1, %2").arg(framesize)); - onStop(false); - emit error(); - return; - } - int32 samplesCnt = framesize / sizeof(short); - - int res = 0; - char err[AV_ERROR_MAX_STRING_SIZE] = { 0 }; - - auto srcSamplesDataChannel = (short*)(_captured.data() + offset); - auto srcSamplesData = &srcSamplesDataChannel; - -// memcpy(d->srcSamplesData[0], _captured.constData() + offset, framesize); - int32 skipSamples = AudioVoiceMsgSkip * AudioVoiceMsgFrequency / 1000, fadeSamples = AudioVoiceMsgFade * AudioVoiceMsgFrequency / 1000; - if (d->fullSamples < skipSamples + fadeSamples) { - int32 fadedCnt = qMin(samplesCnt, skipSamples + fadeSamples - d->fullSamples); - float64 coef = 1. / fadeSamples, fadedFrom = d->fullSamples - skipSamples; - short *ptr = srcSamplesDataChannel, *zeroEnd = ptr + qMin(samplesCnt, qMax(0, skipSamples - d->fullSamples)), *end = ptr + fadedCnt; - for (; ptr != zeroEnd; ++ptr, ++fadedFrom) { - *ptr = 0; - } - for (; ptr != end; ++ptr, ++fadedFrom) { - *ptr = qRound(fadedFrom * coef * *ptr); - } - } - - d->waveform.reserve(d->waveform.size() + (samplesCnt / d->waveformEach) + 1); - for (short *ptr = srcSamplesDataChannel, *end = ptr + samplesCnt; ptr != end; ++ptr) { - uint16 value = qAbs(*ptr); - if (d->waveformPeak < value) { - d->waveformPeak = value; - } - if (++d->waveformMod == d->waveformEach) { - d->waveformMod -= d->waveformEach; - d->waveform.push_back(uchar(d->waveformPeak / 256)); - d->waveformPeak = 0; - } - } - - // Convert to final format - - d->dstSamples = av_rescale_rnd(swr_get_delay(d->swrContext, d->codecContext->sample_rate) + d->srcSamples, d->codecContext->sample_rate, d->codecContext->sample_rate, AV_ROUND_UP); - if (d->dstSamples > d->maxDstSamples) { - d->maxDstSamples = d->dstSamples; - av_freep(&d->dstSamplesData[0]); - if ((res = av_samples_alloc(d->dstSamplesData, 0, d->codecContext->channels, d->dstSamples, d->codecContext->sample_fmt, 1)) < 0) { - LOG(("Audio Error: Unable to av_samples_alloc for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res))); - onStop(false); - emit error(); - return; - } - d->dstSamplesSize = av_samples_get_buffer_size(0, d->codecContext->channels, d->maxDstSamples, d->codecContext->sample_fmt, 0); - } - - if ((res = swr_convert(d->swrContext, d->dstSamplesData, d->dstSamples, (const uint8_t **)srcSamplesData, d->srcSamples)) < 0) { - LOG(("Audio Error: Unable to swr_convert for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res))); - onStop(false); - emit error(); - return; - } - - // Write audio frame - - AVFrame *frame = av_frame_alloc(); - - frame->nb_samples = d->dstSamples; - frame->pts = av_rescale_q(d->fullSamples, AVRational{1, d->codecContext->sample_rate}, d->codecContext->time_base); - - avcodec_fill_audio_frame(frame, d->codecContext->channels, d->codecContext->sample_fmt, d->dstSamplesData[0], d->dstSamplesSize, 0); - - writeFrame(frame); - - d->fullSamples += samplesCnt; - - av_frame_free(&frame); -} - -void AudioCaptureInner::writeFrame(AVFrame *frame) { - int res = 0; - char err[AV_ERROR_MAX_STRING_SIZE] = { 0 }; - - res = avcodec_send_frame(d->codecContext, frame); - if (res == AVERROR(EAGAIN)) { - int packetsWritten = writePackets(); - if (packetsWritten < 0) { - if (frame && packetsWritten == AVERROR_EOF) { - LOG(("Audio Error: EOF in packets received when EAGAIN was got in avcodec_send_frame()")); - onStop(false); - emit error(); - } - return; - } else if (!packetsWritten) { - LOG(("Audio Error: No packets received when EAGAIN was got in avcodec_send_frame()")); - onStop(false); - emit error(); - return; - } - res = avcodec_send_frame(d->codecContext, frame); - } - if (res < 0) { - LOG(("Audio Error: Unable to avcodec_send_frame for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res))); - onStop(false); - emit error(); - return; - } - - if (!frame) { // drain - if ((res = writePackets()) != AVERROR_EOF) { - LOG(("Audio Error: not EOF in packets received when draining the codec, result %1").arg(res)); - onStop(false); - emit error(); - } - } -} - -int AudioCaptureInner::writePackets() { - AVPacket pkt; - memset(&pkt, 0, sizeof(pkt)); // data and size must be 0; - - int res = 0; - char err[AV_ERROR_MAX_STRING_SIZE] = { 0 }; - - int written = 0; - do { - av_init_packet(&pkt); - if ((res = avcodec_receive_packet(d->codecContext, &pkt)) < 0) { - if (res == AVERROR(EAGAIN)) { - return written; - } else if (res == AVERROR_EOF) { - return res; - } - LOG(("Audio Error: Unable to avcodec_receive_packet for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res))); - onStop(false); - emit error(); - return res; - } - - av_packet_rescale_ts(&pkt, d->codecContext->time_base, d->stream->time_base); - pkt.stream_index = d->stream->index; - if ((res = av_interleaved_write_frame(d->fmtContext, &pkt)) < 0) { - LOG(("Audio Error: Unable to av_interleaved_write_frame for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res))); - onStop(false); - emit error(); - return -1; - } - - ++written; - av_packet_unref(&pkt); - } while (true); - return written; -} +} // namespace internal class FFMpegAttributesReader : public AbstractFFMpegLoader { public: diff --git a/Telegram/SourceFiles/media/media_audio.h b/Telegram/SourceFiles/media/media_audio.h index 398d18d1bd..6bd24ae814 100644 --- a/Telegram/SourceFiles/media/media_audio.h +++ b/Telegram/SourceFiles/media/media_audio.h @@ -20,13 +20,6 @@ Copyright (c) 2014-2017 John Preston, https://desktop.telegram.org */ #pragma once -#include "core/basic_types.h" - -void audioInit(); -bool audioWorks(); -void audioPlayNotify(); -void audioFinish(); - enum AudioPlayerState { AudioPlayerStopped = 0x01, AudioPlayerStoppedAtEnd = 0x02, @@ -43,9 +36,6 @@ enum AudioPlayerState { AudioPlayerResuming = 0x38, }; -class AudioPlayerFader; -class AudioPlayerLoaders; - struct VideoSoundData; struct VideoSoundPart; struct AudioPlaybackState { @@ -55,11 +45,25 @@ struct AudioPlaybackState { int32 frequency = 0; }; -class AudioPlayer : public QObject, public base::Observable, private base::Subscriber { +namespace Media { +namespace Player { + +void InitAudio(); +void DeInitAudio(); + +base::Observable &Updated(); +bool CreateAudioPlaybackDevice(); + +void PlayNotify(); + +class Fader; +class Loaders; + +class Mixer : public QObject, private base::Subscriber { Q_OBJECT public: - AudioPlayer(); + Mixer(); void play(const AudioMsgId &audio, int64 position = 0); void pauseresume(AudioMsgId::Type type, bool fast = false); @@ -81,11 +85,9 @@ public: void clearStoppedAtStart(const AudioMsgId &audio); - void resumeDevice(); + ~Mixer(); - ~AudioPlayer(); - -private slots: + private slots: void onError(const AudioMsgId &audio); void onStopped(const AudioMsgId &audio); @@ -161,60 +163,22 @@ private: QMutex _mutex; - friend class AudioPlayerFader; - friend class AudioPlayerLoaders; + friend class Fader; + friend class Loaders; QThread _faderThread, _loaderThread; - AudioPlayerFader *_fader; - AudioPlayerLoaders *_loader; + Fader *_fader; + Loaders *_loader; }; -namespace internal { +Mixer *mixer(); -QMutex *audioPlayerMutex(); -float64 audioSuppressGain(); -float64 audioSuppressSongGain(); -bool audioCheckError(); - -} // namespace internal - -class AudioCaptureInner; - -class AudioCapture : public QObject { +class Fader : public QObject { Q_OBJECT public: - AudioCapture(); - - bool check(); - - ~AudioCapture(); - -signals: - void start(); - void stop(bool needResult); - - void done(QByteArray data, VoiceWaveform waveform, qint32 samples); - void updated(quint16 level, qint32 samples); - void error(); - -private: - friend class AudioCaptureInner; - - QThread _captureThread; - AudioCaptureInner *_capture; - -}; - -AudioPlayer *audioPlayer(); -AudioCapture *audioCapture(); - -class AudioPlayerFader : public QObject { - Q_OBJECT - -public: - AudioPlayerFader(QThread *thread); + Fader(QThread *thread); void resumeDevice(); signals: @@ -244,8 +208,8 @@ private: EmitPositionUpdated = 0x04, EmitNeedToPreload = 0x08, }; - int32 updateOnePlayback(AudioPlayer::AudioMsg *m, bool &hasPlaying, bool &hasFading, float64 suppressGain, bool suppressGainChanged); - void setStoppedState(AudioPlayer::AudioMsg *m, AudioPlayerState state = AudioPlayerStopped); + int32 updateOnePlayback(Mixer::AudioMsg *m, bool &hasPlaying, bool &hasFading, float64 suppressGain, bool suppressGainChanged); + void setStoppedState(Mixer::AudioMsg *m, AudioPlayerState state = AudioPlayerStopped); QTimer _timer, _pauseTimer; QMutex _pauseMutex; @@ -263,42 +227,17 @@ private: }; -struct AudioCapturePrivate; -struct AVFrame; +} // namespace Player +} // namespace Media -class AudioCaptureInner : public QObject { - Q_OBJECT +namespace internal { -public: - AudioCaptureInner(QThread *thread); - ~AudioCaptureInner(); +QMutex *audioPlayerMutex(); +float64 audioSuppressGain(); +float64 audioSuppressSongGain(); +bool audioCheckError(); -signals: - void error(); - void updated(quint16 level, qint32 samples); - void done(QByteArray data, VoiceWaveform waveform, qint32 samples); - -public slots: - void onInit(); - void onStart(); - void onStop(bool needResult); - - void onTimeout(); - -private: - void processFrame(int32 offset, int32 framesize); - - void writeFrame(AVFrame *frame); - - // Writes the packets till EAGAIN is got from av_receive_packet() - // Returns number of packets written or -1 on error - int writePackets(); - - AudioCapturePrivate *d; - QTimer _timer; - QByteArray _captured; - -}; +} // namespace internal MTPDocumentAttribute audioReadSongAttributes(const QString &fname, const QByteArray &data, QImage &cover, QByteArray &coverBytes, QByteArray &coverFormat); VoiceWaveform audioCountWaveform(const FileLocation &file, const QByteArray &data); diff --git a/Telegram/SourceFiles/media/media_audio_capture.cpp b/Telegram/SourceFiles/media/media_audio_capture.cpp new file mode 100644 index 0000000000..d1ad976e19 --- /dev/null +++ b/Telegram/SourceFiles/media/media_audio_capture.cpp @@ -0,0 +1,700 @@ +/* +This file is part of Telegram Desktop, +the official desktop version of Telegram messaging app, see https://telegram.org + +Telegram Desktop is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +It is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +In addition, as a special exception, the copyright holders give permission +to link the code of portions of this program with the OpenSSL library. + +Full license: https://github.com/telegramdesktop/tdesktop/blob/master/LICENSE +Copyright (c) 2014-2017 John Preston, https://desktop.telegram.org +*/ +#include "stdafx.h" +#include "media/media_audio_capture.h" + +#include "media/media_audio_ffmpeg_loader.h" + +#include +#include + +#define AL_ALEXT_PROTOTYPES +#include + +namespace Media { + +namespace Capture { +namespace { + +Instance *CaptureInstance = nullptr; + +bool ErrorHappened(ALCdevice *device) { + ALenum errCode; + if ((errCode = alcGetError(device)) != ALC_NO_ERROR) { + LOG(("Audio Capture Error: %1, %2").arg(errCode).arg((const char *)alcGetString(device, errCode))); + return true; + } + return false; +} + +} // namespace + +void Init() { + t_assert(CaptureInstance == nullptr); + CaptureInstance = new Instance(); + instance()->check(); +} + +void DeInit() { + delete base::take(CaptureInstance); +} + +Instance::Instance() : _inner(new Inner(&_thread)) { + CaptureInstance = this; + connect(this, SIGNAL(start()), _inner, SLOT(onStart())); + connect(this, SIGNAL(stop(bool)), _inner, SLOT(onStop(bool))); + connect(_inner, SIGNAL(done(QByteArray, VoiceWaveform, qint32)), this, SIGNAL(done(QByteArray, VoiceWaveform, qint32))); + connect(_inner, SIGNAL(updated(quint16, qint32)), this, SIGNAL(updated(quint16, qint32))); + connect(_inner, SIGNAL(error()), this, SIGNAL(error())); + connect(&_thread, SIGNAL(started()), _inner, SLOT(onInit())); + connect(&_thread, SIGNAL(finished()), _inner, SLOT(deleteLater())); + _thread.start(); +} + +void Instance::check() { + _available = false; + if (auto defaultDevice = alcGetString(0, ALC_CAPTURE_DEFAULT_DEVICE_SPECIFIER)) { + if (auto device = alcCaptureOpenDevice(defaultDevice, AudioVoiceMsgFrequency, AL_FORMAT_MONO16, AudioVoiceMsgFrequency / 5)) { + auto error = ErrorHappened(device); + alcCaptureCloseDevice(device); + _available = !error; + } + } +} + +Instance::~Instance() { + _inner = nullptr; + _thread.quit(); + _thread.wait(); +} + +Instance *instance() { + return CaptureInstance; +} + +struct Instance::Inner::Private { + ALCdevice *device = nullptr; + AVOutputFormat *fmt = nullptr; + uchar *ioBuffer = nullptr; + AVIOContext *ioContext = nullptr; + AVFormatContext *fmtContext = nullptr; + AVStream *stream = nullptr; + AVCodec *codec = nullptr; + AVCodecContext *codecContext = nullptr; + bool opened = false; + + int srcSamples = 0; + int dstSamples = 0; + int maxDstSamples = 0; + int dstSamplesSize = 0; + int fullSamples = 0; + uint8_t **srcSamplesData = nullptr; + uint8_t **dstSamplesData = nullptr; + SwrContext *swrContext = nullptr; + + int32 lastUpdate = 0; + uint16 levelMax = 0; + + QByteArray data; + int32 dataPos = 0; + + int64 waveformMod = 0; + int64 waveformEach = (AudioVoiceMsgFrequency / 100); + uint16 waveformPeak = 0; + QVector waveform; + + static int _read_data(void *opaque, uint8_t *buf, int buf_size) { + auto l = reinterpret_cast(opaque); + + int32 nbytes = qMin(l->data.size() - l->dataPos, int32(buf_size)); + if (nbytes <= 0) { + return 0; + } + + memcpy(buf, l->data.constData() + l->dataPos, nbytes); + l->dataPos += nbytes; + return nbytes; + } + + static int _write_data(void *opaque, uint8_t *buf, int buf_size) { + auto l = reinterpret_cast(opaque); + + if (buf_size <= 0) return 0; + if (l->dataPos + buf_size > l->data.size()) l->data.resize(l->dataPos + buf_size); + memcpy(l->data.data() + l->dataPos, buf, buf_size); + l->dataPos += buf_size; + return buf_size; + } + + static int64_t _seek_data(void *opaque, int64_t offset, int whence) { + auto l = reinterpret_cast(opaque); + + int32 newPos = -1; + switch (whence) { + case SEEK_SET: newPos = offset; break; + case SEEK_CUR: newPos = l->dataPos + offset; break; + case SEEK_END: newPos = l->data.size() + offset; break; + } + if (newPos < 0) { + return -1; + } + l->dataPos = newPos; + return l->dataPos; + } +}; + +Instance::Inner::Inner(QThread *thread) : d(new Private()) { + moveToThread(thread); + _timer.moveToThread(thread); + connect(&_timer, SIGNAL(timeout()), this, SLOT(onTimeout())); +} + +Instance::Inner::~Inner() { + onStop(false); + delete d; +} + +void Instance::Inner::onInit() { +} + +void Instance::Inner::onStart() { + + // Start OpenAL Capture + const ALCchar *dName = alcGetString(0, ALC_CAPTURE_DEFAULT_DEVICE_SPECIFIER); + DEBUG_LOG(("Audio Info: Capture device name '%1'").arg(dName)); + d->device = alcCaptureOpenDevice(dName, AudioVoiceMsgFrequency, AL_FORMAT_MONO16, AudioVoiceMsgFrequency / 5); + if (!d->device) { + LOG(("Audio Error: capture device not present!")); + emit error(); + return; + } + alcCaptureStart(d->device); + if (ErrorHappened(d->device)) { + alcCaptureCloseDevice(d->device); + d->device = nullptr; + emit error(); + return; + } + + // Create encoding context + + d->ioBuffer = (uchar*)av_malloc(AVBlockSize); + + d->ioContext = avio_alloc_context(d->ioBuffer, AVBlockSize, 1, static_cast(d), &Private::_read_data, &Private::_write_data, &Private::_seek_data); + int res = 0; + char err[AV_ERROR_MAX_STRING_SIZE] = { 0 }; + AVOutputFormat *fmt = 0; + while ((fmt = av_oformat_next(fmt))) { + if (fmt->name == qstr("opus")) { + break; + } + } + if (!fmt) { + LOG(("Audio Error: Unable to find opus AVOutputFormat for capture")); + onStop(false); + emit error(); + return; + } + + if ((res = avformat_alloc_output_context2(&d->fmtContext, fmt, 0, 0)) < 0) { + LOG(("Audio Error: Unable to avformat_alloc_output_context2 for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res))); + onStop(false); + emit error(); + return; + } + d->fmtContext->pb = d->ioContext; + d->fmtContext->flags |= AVFMT_FLAG_CUSTOM_IO; + d->opened = true; + + // Add audio stream + d->codec = avcodec_find_encoder(fmt->audio_codec); + if (!d->codec) { + LOG(("Audio Error: Unable to avcodec_find_encoder for capture")); + onStop(false); + emit error(); + return; + } + d->stream = avformat_new_stream(d->fmtContext, d->codec); + if (!d->stream) { + LOG(("Audio Error: Unable to avformat_new_stream for capture")); + onStop(false); + emit error(); + return; + } + d->stream->id = d->fmtContext->nb_streams - 1; + d->codecContext = avcodec_alloc_context3(d->codec); + if (!d->codecContext) { + LOG(("Audio Error: Unable to avcodec_alloc_context3 for capture")); + onStop(false); + emit error(); + return; + } + + av_opt_set_int(d->codecContext, "refcounted_frames", 1, 0); + + d->codecContext->sample_fmt = AV_SAMPLE_FMT_FLTP; + d->codecContext->bit_rate = 64000; + d->codecContext->channel_layout = AV_CH_LAYOUT_MONO; + d->codecContext->sample_rate = AudioVoiceMsgFrequency; + d->codecContext->channels = 1; + + if (d->fmtContext->oformat->flags & AVFMT_GLOBALHEADER) { + d->codecContext->flags |= CODEC_FLAG_GLOBAL_HEADER; + } + + // Open audio stream + if ((res = avcodec_open2(d->codecContext, d->codec, nullptr)) < 0) { + LOG(("Audio Error: Unable to avcodec_open2 for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res))); + onStop(false); + emit error(); + return; + } + + // Alloc source samples + + d->srcSamples = (d->codecContext->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE) ? 10000 : d->codecContext->frame_size; + //if ((res = av_samples_alloc_array_and_samples(&d->srcSamplesData, 0, d->codecContext->channels, d->srcSamples, d->codecContext->sample_fmt, 0)) < 0) { + // LOG(("Audio Error: Unable to av_samples_alloc_array_and_samples for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res))); + // onStop(false); + // emit error(); + // return; + //} + // Using _captured directly + + // Prepare resampling + d->swrContext = swr_alloc(); + if (!d->swrContext) { + fprintf(stderr, "Could not allocate resampler context\n"); + exit(1); + } + + av_opt_set_int(d->swrContext, "in_channel_count", d->codecContext->channels, 0); + av_opt_set_int(d->swrContext, "in_sample_rate", d->codecContext->sample_rate, 0); + av_opt_set_sample_fmt(d->swrContext, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0); + av_opt_set_int(d->swrContext, "out_channel_count", d->codecContext->channels, 0); + av_opt_set_int(d->swrContext, "out_sample_rate", d->codecContext->sample_rate, 0); + av_opt_set_sample_fmt(d->swrContext, "out_sample_fmt", d->codecContext->sample_fmt, 0); + + if ((res = swr_init(d->swrContext)) < 0) { + LOG(("Audio Error: Unable to swr_init for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res))); + onStop(false); + emit error(); + return; + } + + d->maxDstSamples = d->srcSamples; + if ((res = av_samples_alloc_array_and_samples(&d->dstSamplesData, 0, d->codecContext->channels, d->maxDstSamples, d->codecContext->sample_fmt, 0)) < 0) { + LOG(("Audio Error: Unable to av_samples_alloc_array_and_samples for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res))); + onStop(false); + emit error(); + return; + } + d->dstSamplesSize = av_samples_get_buffer_size(0, d->codecContext->channels, d->maxDstSamples, d->codecContext->sample_fmt, 0); + + if ((res = avcodec_parameters_from_context(d->stream->codecpar, d->codecContext)) < 0) { + LOG(("Audio Error: Unable to avcodec_parameters_from_context for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res))); + onStop(false); + emit error(); + return; + } + + // Write file header + if ((res = avformat_write_header(d->fmtContext, 0)) < 0) { + LOG(("Audio Error: Unable to avformat_write_header for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res))); + onStop(false); + emit error(); + return; + } + + _timer.start(50); + _captured.clear(); + _captured.reserve(AudioVoiceMsgBufferSize); + DEBUG_LOG(("Audio Capture: started!")); +} + +void Instance::Inner::onStop(bool needResult) { + if (!_timer.isActive()) return; // in onStop() already + _timer.stop(); + + if (d->device) { + alcCaptureStop(d->device); + onTimeout(); // get last data + } + + // Write what is left + if (!_captured.isEmpty()) { + int32 fadeSamples = AudioVoiceMsgFade * AudioVoiceMsgFrequency / 1000, capturedSamples = _captured.size() / sizeof(short); + if ((_captured.size() % sizeof(short)) || (d->fullSamples + capturedSamples < AudioVoiceMsgFrequency) || (capturedSamples < fadeSamples)) { + d->fullSamples = 0; + d->dataPos = 0; + d->data.clear(); + d->waveformMod = 0; + d->waveformPeak = 0; + d->waveform.clear(); + } else { + float64 coef = 1. / fadeSamples, fadedFrom = 0; + for (short *ptr = ((short*)_captured.data()) + capturedSamples, *end = ptr - fadeSamples; ptr != end; ++fadedFrom) { + --ptr; + *ptr = qRound(fadedFrom * coef * *ptr); + } + if (capturedSamples % d->srcSamples) { + int32 s = _captured.size(); + _captured.resize(s + (d->srcSamples - (capturedSamples % d->srcSamples)) * sizeof(short)); + memset(_captured.data() + s, 0, _captured.size() - s); + } + + int32 framesize = d->srcSamples * d->codecContext->channels * sizeof(short), encoded = 0; + while (_captured.size() >= encoded + framesize) { + processFrame(encoded, framesize); + encoded += framesize; + } + writeFrame(nullptr); // drain the codec + if (encoded != _captured.size()) { + d->fullSamples = 0; + d->dataPos = 0; + d->data.clear(); + d->waveformMod = 0; + d->waveformPeak = 0; + d->waveform.clear(); + } + } + } + DEBUG_LOG(("Audio Capture: stopping (need result: %1), size: %2, samples: %3").arg(Logs::b(needResult)).arg(d->data.size()).arg(d->fullSamples)); + _captured = QByteArray(); + + // Finish stream + if (d->device) { + av_write_trailer(d->fmtContext); + } + + QByteArray result = d->fullSamples ? d->data : QByteArray(); + VoiceWaveform waveform; + qint32 samples = d->fullSamples; + if (samples && !d->waveform.isEmpty()) { + int64 count = d->waveform.size(), sum = 0; + if (count >= WaveformSamplesCount) { + QVector peaks; + peaks.reserve(WaveformSamplesCount); + + uint16 peak = 0; + for (int32 i = 0; i < count; ++i) { + uint16 sample = uint16(d->waveform.at(i)) * 256; + if (peak < sample) { + peak = sample; + } + sum += WaveformSamplesCount; + if (sum >= count) { + sum -= count; + peaks.push_back(peak); + peak = 0; + } + } + + int64 sum = std::accumulate(peaks.cbegin(), peaks.cend(), 0ULL); + peak = qMax(int32(sum * 1.8 / peaks.size()), 2500); + + waveform.resize(peaks.size()); + for (int32 i = 0, l = peaks.size(); i != l; ++i) { + waveform[i] = char(qMin(31U, uint32(qMin(peaks.at(i), peak)) * 31 / peak)); + } + } + } + if (d->device) { + alcCaptureStop(d->device); + alcCaptureCloseDevice(d->device); + d->device = nullptr; + + if (d->codecContext) { + avcodec_free_context(&d->codecContext); + d->codecContext = nullptr; + } + if (d->srcSamplesData) { + if (d->srcSamplesData[0]) { + av_freep(&d->srcSamplesData[0]); + } + av_freep(&d->srcSamplesData); + } + if (d->dstSamplesData) { + if (d->dstSamplesData[0]) { + av_freep(&d->dstSamplesData[0]); + } + av_freep(&d->dstSamplesData); + } + d->fullSamples = 0; + if (d->swrContext) { + swr_free(&d->swrContext); + d->swrContext = nullptr; + } + if (d->opened) { + avformat_close_input(&d->fmtContext); + d->opened = false; + } + if (d->ioContext) { + av_freep(&d->ioContext->buffer); + av_freep(&d->ioContext); + d->ioBuffer = nullptr; + } else if (d->ioBuffer) { + av_freep(&d->ioBuffer); + } + if (d->fmtContext) { + avformat_free_context(d->fmtContext); + d->fmtContext = nullptr; + } + d->fmt = nullptr; + d->stream = nullptr; + d->codec = nullptr; + + d->lastUpdate = 0; + d->levelMax = 0; + + d->dataPos = 0; + d->data.clear(); + + d->waveformMod = 0; + d->waveformPeak = 0; + d->waveform.clear(); + } + if (needResult) emit done(result, waveform, samples); +} + +void Instance::Inner::onTimeout() { + if (!d->device) { + _timer.stop(); + return; + } + ALint samples; + alcGetIntegerv(d->device, ALC_CAPTURE_SAMPLES, sizeof(samples), &samples); + if (ErrorHappened(d->device)) { + onStop(false); + emit error(); + return; + } + if (samples > 0) { + // Get samples from OpenAL + int32 s = _captured.size(), news = s + samples * sizeof(short); + if (news / AudioVoiceMsgBufferSize > s / AudioVoiceMsgBufferSize) { + _captured.reserve(((news / AudioVoiceMsgBufferSize) + 1) * AudioVoiceMsgBufferSize); + } + _captured.resize(news); + alcCaptureSamples(d->device, (ALCvoid *)(_captured.data() + s), samples); + if (ErrorHappened(d->device)) { + onStop(false); + emit error(); + return; + } + + // Count new recording level and update view + int32 skipSamples = AudioVoiceMsgSkip * AudioVoiceMsgFrequency / 1000, fadeSamples = AudioVoiceMsgFade * AudioVoiceMsgFrequency / 1000; + int32 levelindex = d->fullSamples + (s / sizeof(short)); + for (const short *ptr = (const short*)(_captured.constData() + s), *end = (const short*)(_captured.constData() + news); ptr < end; ++ptr, ++levelindex) { + if (levelindex > skipSamples) { + uint16 value = qAbs(*ptr); + if (levelindex < skipSamples + fadeSamples) { + value = qRound(value * float64(levelindex - skipSamples) / fadeSamples); + } + if (d->levelMax < value) { + d->levelMax = value; + } + } + } + qint32 samplesFull = d->fullSamples + _captured.size() / sizeof(short), samplesSinceUpdate = samplesFull - d->lastUpdate; + if (samplesSinceUpdate > AudioVoiceMsgUpdateView * AudioVoiceMsgFrequency / 1000) { + emit updated(d->levelMax, samplesFull); + d->lastUpdate = samplesFull; + d->levelMax = 0; + } + // Write frames + int32 framesize = d->srcSamples * d->codecContext->channels * sizeof(short), encoded = 0; + while (uint32(_captured.size()) >= encoded + framesize + fadeSamples * sizeof(short)) { + processFrame(encoded, framesize); + encoded += framesize; + } + + // Collapse the buffer + if (encoded > 0) { + int32 goodSize = _captured.size() - encoded; + memmove(_captured.data(), _captured.constData() + encoded, goodSize); + _captured.resize(goodSize); + } + } else { + DEBUG_LOG(("Audio Capture: no samples to capture.")); + } +} + +void Instance::Inner::processFrame(int32 offset, int32 framesize) { + // Prepare audio frame + + if (framesize % sizeof(short)) { // in the middle of a sample + LOG(("Audio Error: Bad framesize in writeFrame() for capture, framesize %1, %2").arg(framesize)); + onStop(false); + emit error(); + return; + } + int32 samplesCnt = framesize / sizeof(short); + + int res = 0; + char err[AV_ERROR_MAX_STRING_SIZE] = { 0 }; + + auto srcSamplesDataChannel = (short*)(_captured.data() + offset); + auto srcSamplesData = &srcSamplesDataChannel; + + // memcpy(d->srcSamplesData[0], _captured.constData() + offset, framesize); + int32 skipSamples = AudioVoiceMsgSkip * AudioVoiceMsgFrequency / 1000, fadeSamples = AudioVoiceMsgFade * AudioVoiceMsgFrequency / 1000; + if (d->fullSamples < skipSamples + fadeSamples) { + int32 fadedCnt = qMin(samplesCnt, skipSamples + fadeSamples - d->fullSamples); + float64 coef = 1. / fadeSamples, fadedFrom = d->fullSamples - skipSamples; + short *ptr = srcSamplesDataChannel, *zeroEnd = ptr + qMin(samplesCnt, qMax(0, skipSamples - d->fullSamples)), *end = ptr + fadedCnt; + for (; ptr != zeroEnd; ++ptr, ++fadedFrom) { + *ptr = 0; + } + for (; ptr != end; ++ptr, ++fadedFrom) { + *ptr = qRound(fadedFrom * coef * *ptr); + } + } + + d->waveform.reserve(d->waveform.size() + (samplesCnt / d->waveformEach) + 1); + for (short *ptr = srcSamplesDataChannel, *end = ptr + samplesCnt; ptr != end; ++ptr) { + uint16 value = qAbs(*ptr); + if (d->waveformPeak < value) { + d->waveformPeak = value; + } + if (++d->waveformMod == d->waveformEach) { + d->waveformMod -= d->waveformEach; + d->waveform.push_back(uchar(d->waveformPeak / 256)); + d->waveformPeak = 0; + } + } + + // Convert to final format + + d->dstSamples = av_rescale_rnd(swr_get_delay(d->swrContext, d->codecContext->sample_rate) + d->srcSamples, d->codecContext->sample_rate, d->codecContext->sample_rate, AV_ROUND_UP); + if (d->dstSamples > d->maxDstSamples) { + d->maxDstSamples = d->dstSamples; + av_freep(&d->dstSamplesData[0]); + if ((res = av_samples_alloc(d->dstSamplesData, 0, d->codecContext->channels, d->dstSamples, d->codecContext->sample_fmt, 1)) < 0) { + LOG(("Audio Error: Unable to av_samples_alloc for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res))); + onStop(false); + emit error(); + return; + } + d->dstSamplesSize = av_samples_get_buffer_size(0, d->codecContext->channels, d->maxDstSamples, d->codecContext->sample_fmt, 0); + } + + if ((res = swr_convert(d->swrContext, d->dstSamplesData, d->dstSamples, (const uint8_t **)srcSamplesData, d->srcSamples)) < 0) { + LOG(("Audio Error: Unable to swr_convert for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res))); + onStop(false); + emit error(); + return; + } + + // Write audio frame + + AVFrame *frame = av_frame_alloc(); + + frame->nb_samples = d->dstSamples; + frame->pts = av_rescale_q(d->fullSamples, AVRational { 1, d->codecContext->sample_rate }, d->codecContext->time_base); + + avcodec_fill_audio_frame(frame, d->codecContext->channels, d->codecContext->sample_fmt, d->dstSamplesData[0], d->dstSamplesSize, 0); + + writeFrame(frame); + + d->fullSamples += samplesCnt; + + av_frame_free(&frame); +} + +void Instance::Inner::writeFrame(AVFrame *frame) { + int res = 0; + char err[AV_ERROR_MAX_STRING_SIZE] = { 0 }; + + res = avcodec_send_frame(d->codecContext, frame); + if (res == AVERROR(EAGAIN)) { + int packetsWritten = writePackets(); + if (packetsWritten < 0) { + if (frame && packetsWritten == AVERROR_EOF) { + LOG(("Audio Error: EOF in packets received when EAGAIN was got in avcodec_send_frame()")); + onStop(false); + emit error(); + } + return; + } else if (!packetsWritten) { + LOG(("Audio Error: No packets received when EAGAIN was got in avcodec_send_frame()")); + onStop(false); + emit error(); + return; + } + res = avcodec_send_frame(d->codecContext, frame); + } + if (res < 0) { + LOG(("Audio Error: Unable to avcodec_send_frame for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res))); + onStop(false); + emit error(); + return; + } + + if (!frame) { // drain + if ((res = writePackets()) != AVERROR_EOF) { + LOG(("Audio Error: not EOF in packets received when draining the codec, result %1").arg(res)); + onStop(false); + emit error(); + } + } +} + +int Instance::Inner::writePackets() { + AVPacket pkt; + memset(&pkt, 0, sizeof(pkt)); // data and size must be 0; + + int res = 0; + char err[AV_ERROR_MAX_STRING_SIZE] = { 0 }; + + int written = 0; + do { + av_init_packet(&pkt); + if ((res = avcodec_receive_packet(d->codecContext, &pkt)) < 0) { + if (res == AVERROR(EAGAIN)) { + return written; + } else if (res == AVERROR_EOF) { + return res; + } + LOG(("Audio Error: Unable to avcodec_receive_packet for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res))); + onStop(false); + emit error(); + return res; + } + + av_packet_rescale_ts(&pkt, d->codecContext->time_base, d->stream->time_base); + pkt.stream_index = d->stream->index; + if ((res = av_interleaved_write_frame(d->fmtContext, &pkt)) < 0) { + LOG(("Audio Error: Unable to av_interleaved_write_frame for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res))); + onStop(false); + emit error(); + return -1; + } + + ++written; + av_packet_unref(&pkt); + } while (true); + return written; +} + +} // namespace Capture +} // namespace Media diff --git a/Telegram/SourceFiles/media/media_audio_capture.h b/Telegram/SourceFiles/media/media_audio_capture.h new file mode 100644 index 0000000000..05c05b6713 --- /dev/null +++ b/Telegram/SourceFiles/media/media_audio_capture.h @@ -0,0 +1,101 @@ +/* +This file is part of Telegram Desktop, +the official desktop version of Telegram messaging app, see https://telegram.org + +Telegram Desktop is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +It is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +In addition, as a special exception, the copyright holders give permission +to link the code of portions of this program with the OpenSSL library. + +Full license: https://github.com/telegramdesktop/tdesktop/blob/master/LICENSE +Copyright (c) 2014-2017 John Preston, https://desktop.telegram.org +*/ +#pragma once + +struct AVFrame; + +namespace Media { +namespace Capture { + +void Init(); +void DeInit(); + +class Instance : public QObject { + Q_OBJECT + +public: + Instance(); + + void check(); + bool available() const { + return _available; + } + + ~Instance(); + +signals: + void start(); + void stop(bool needResult); + + void done(QByteArray data, VoiceWaveform waveform, qint32 samples); + void updated(quint16 level, qint32 samples); + void error(); + +private: + class Inner; + friend class Inner; + + bool _available = false; + QThread _thread; + Inner *_inner; + +}; + +Instance *instance(); + +class Instance::Inner : public QObject { + Q_OBJECT + +public: + Inner(QThread *thread); + ~Inner(); + +signals: + void error(); + void updated(quint16 level, qint32 samples); + void done(QByteArray data, VoiceWaveform waveform, qint32 samples); + +public slots: + void onInit(); + void onStart(); + void onStop(bool needResult); + + void onTimeout(); + +private: + void processFrame(int32 offset, int32 framesize); + + void writeFrame(AVFrame *frame); + + // Writes the packets till EAGAIN is got from av_receive_packet() + // Returns number of packets written or -1 on error + int writePackets(); + + struct Private; + Private *d; + QTimer _timer; + QByteArray _captured; + +}; + +} // namespace Capture +} // namespace Media + diff --git a/Telegram/SourceFiles/media/media_audio_loaders.cpp b/Telegram/SourceFiles/media/media_audio_loaders.cpp index 91ff99c548..075a85f41f 100644 --- a/Telegram/SourceFiles/media/media_audio_loaders.cpp +++ b/Telegram/SourceFiles/media/media_audio_loaders.cpp @@ -25,13 +25,16 @@ Copyright (c) 2014-2017 John Preston, https://desktop.telegram.org #include "media/media_audio_ffmpeg_loader.h" #include "media/media_child_ffmpeg_loader.h" -AudioPlayerLoaders::AudioPlayerLoaders(QThread *thread) : _fromVideoNotify(this, "onVideoSoundAdded") { +namespace Media { +namespace Player { + +Loaders::Loaders(QThread *thread) : _fromVideoNotify(this, "onVideoSoundAdded") { moveToThread(thread); connect(thread, SIGNAL(started()), this, SLOT(onInit())); connect(thread, SIGNAL(finished()), this, SLOT(deleteLater())); } -void AudioPlayerLoaders::feedFromVideo(VideoSoundPart &&part) { +void Loaders::feedFromVideo(VideoSoundPart &&part) { bool invoke = false; { QMutexLocker lock(&_fromVideoMutex); @@ -47,17 +50,17 @@ void AudioPlayerLoaders::feedFromVideo(VideoSoundPart &&part) { } } -void AudioPlayerLoaders::startFromVideo(uint64 videoPlayId) { +void Loaders::startFromVideo(uint64 videoPlayId) { QMutexLocker lock(&_fromVideoMutex); _fromVideoPlayId = videoPlayId; clearFromVideoQueue(); } -void AudioPlayerLoaders::stopFromVideo() { +void Loaders::stopFromVideo() { startFromVideo(0); } -void AudioPlayerLoaders::onVideoSoundAdded() { +void Loaders::onVideoSoundAdded() { bool waitingAndAdded = false; { QMutexLocker lock(&_fromVideoMutex); @@ -71,12 +74,12 @@ void AudioPlayerLoaders::onVideoSoundAdded() { } } -AudioPlayerLoaders::~AudioPlayerLoaders() { +Loaders::~Loaders() { QMutexLocker lock(&_fromVideoMutex); clearFromVideoQueue(); } -void AudioPlayerLoaders::clearFromVideoQueue() { +void Loaders::clearFromVideoQueue() { auto queue = base::take(_fromVideoQueue); for (auto &packetData : queue) { AVPacket packet; @@ -85,18 +88,17 @@ void AudioPlayerLoaders::clearFromVideoQueue() { } } -void AudioPlayerLoaders::onInit() { +void Loaders::onInit() { } -void AudioPlayerLoaders::onStart(const AudioMsgId &audio, qint64 position) { +void Loaders::onStart(const AudioMsgId &audio, qint64 position) { auto type = audio.type(); clear(type); { QMutexLocker lock(internal::audioPlayerMutex()); - AudioPlayer *voice = audioPlayer(); - if (!voice) return; + if (!mixer()) return; - auto data = voice->dataForType(type); + auto data = mixer()->dataForType(type); if (!data) return; data->loading = true; @@ -105,7 +107,7 @@ void AudioPlayerLoaders::onStart(const AudioMsgId &audio, qint64 position) { loadData(audio, position); } -AudioMsgId AudioPlayerLoaders::clear(AudioMsgId::Type type) { +AudioMsgId Loaders::clear(AudioMsgId::Type type) { AudioMsgId result; switch (type) { case AudioMsgId::Type::Voice: std::swap(result, _audio); _audioLoader = nullptr; break; @@ -115,20 +117,20 @@ AudioMsgId AudioPlayerLoaders::clear(AudioMsgId::Type type) { return result; } -void AudioPlayerLoaders::setStoppedState(AudioPlayer::AudioMsg *m, AudioPlayerState state) { +void Loaders::setStoppedState(Mixer::AudioMsg *m, AudioPlayerState state) { m->playbackState.state = state; m->playbackState.position = 0; } -void AudioPlayerLoaders::emitError(AudioMsgId::Type type) { +void Loaders::emitError(AudioMsgId::Type type) { emit error(clear(type)); } -void AudioPlayerLoaders::onLoad(const AudioMsgId &audio) { +void Loaders::onLoad(const AudioMsgId &audio) { loadData(audio, 0); } -void AudioPlayerLoaders::loadData(AudioMsgId audio, qint64 position) { +void Loaders::loadData(AudioMsgId audio, qint64 position) { SetupError err = SetupNoErrorStarted; auto type = audio.type(); AudioPlayerLoader *l = setupLoader(audio, err, position); @@ -156,7 +158,7 @@ void AudioPlayerLoaders::loadData(AudioMsgId audio, qint64 position) { if (errAtStart) { { QMutexLocker lock(internal::audioPlayerMutex()); - AudioPlayer::AudioMsg *m = checkLoader(type); + auto m = checkLoader(type); if (m) m->playbackState.state = AudioPlayerStoppedAtStart; } emitError(type); @@ -185,7 +187,7 @@ void AudioPlayerLoaders::loadData(AudioMsgId audio, qint64 position) { } QMutexLocker lock(internal::audioPlayerMutex()); - AudioPlayer::AudioMsg *m = checkLoader(type); + auto m = checkLoader(type); if (!m) { clear(type); return; @@ -294,8 +296,6 @@ void AudioPlayerLoaders::loadData(AudioMsgId audio, qint64 position) { alGetSourcei(m->source, AL_SOURCE_STATE, &state); if (internal::audioCheckError()) { if (state != AL_PLAYING) { - audioPlayer()->resumeDevice(); - switch (type) { case AudioMsgId::Type::Voice: alSourcef(m->source, AL_GAIN, internal::audioSuppressGain()); break; case AudioMsgId::Type::Song: alSourcef(m->source, AL_GAIN, internal::audioSuppressSongGain() * Global::SongVolume()); break; @@ -323,13 +323,12 @@ void AudioPlayerLoaders::loadData(AudioMsgId audio, qint64 position) { } } -AudioPlayerLoader *AudioPlayerLoaders::setupLoader(const AudioMsgId &audio, SetupError &err, qint64 &position) { +AudioPlayerLoader *Loaders::setupLoader(const AudioMsgId &audio, SetupError &err, qint64 &position) { err = SetupErrorAtStart; QMutexLocker lock(internal::audioPlayerMutex()); - AudioPlayer *voice = audioPlayer(); - if (!voice) return nullptr; + if (!mixer()) return nullptr; - auto data = voice->dataForType(audio.type()); + auto data = mixer()->dataForType(audio.type()); if (!data || data->audio != audio || !data->loading) { emit error(audio); LOG(("Audio Error: trying to load part of audio, that is not current at the moment")); @@ -395,11 +394,10 @@ AudioPlayerLoader *AudioPlayerLoaders::setupLoader(const AudioMsgId &audio, Setu return l; } -AudioPlayer::AudioMsg *AudioPlayerLoaders::checkLoader(AudioMsgId::Type type) { - AudioPlayer *voice = audioPlayer(); - if (!voice) return 0; +Mixer::AudioMsg *Loaders::checkLoader(AudioMsgId::Type type) { + if (!mixer()) return nullptr; - auto data = voice->dataForType(type); + auto data = mixer()->dataForType(type); bool isGoodId = false; AudioPlayerLoader *l = nullptr; switch (type) { @@ -417,7 +415,7 @@ AudioPlayer::AudioMsg *AudioPlayerLoaders::checkLoader(AudioMsgId::Type type) { return data; } -void AudioPlayerLoaders::onCancel(const AudioMsgId &audio) { +void Loaders::onCancel(const AudioMsgId &audio) { switch (audio.type()) { case AudioMsgId::Type::Voice: if (_audio == audio) clear(audio.type()); break; case AudioMsgId::Type::Song: if (_song == audio) clear(audio.type()); break; @@ -425,13 +423,15 @@ void AudioPlayerLoaders::onCancel(const AudioMsgId &audio) { } QMutexLocker lock(internal::audioPlayerMutex()); - AudioPlayer *voice = audioPlayer(); - if (!voice) return; + if (!mixer()) return; for (int i = 0; i < AudioSimultaneousLimit; ++i) { - auto data = voice->dataForType(audio.type(), i); + auto data = mixer()->dataForType(audio.type(), i); if (data->audio == audio) { data->loading = false; } } } + +} // namespace Player +} // namespace Media diff --git a/Telegram/SourceFiles/media/media_audio_loaders.h b/Telegram/SourceFiles/media/media_audio_loaders.h index 1a3ff78d2a..4daec64471 100644 --- a/Telegram/SourceFiles/media/media_audio_loaders.h +++ b/Telegram/SourceFiles/media/media_audio_loaders.h @@ -26,21 +26,25 @@ Copyright (c) 2014-2017 John Preston, https://desktop.telegram.org class AudioPlayerLoader; class ChildFFMpegLoader; -class AudioPlayerLoaders : public QObject { + +namespace Media { +namespace Player { + +class Loaders : public QObject { Q_OBJECT public: - AudioPlayerLoaders(QThread *thread); + Loaders(QThread *thread); void startFromVideo(uint64 videoPlayId); void stopFromVideo(); void feedFromVideo(VideoSoundPart &&part); - ~AudioPlayerLoaders(); + ~Loaders(); signals: void error(const AudioMsgId &audio); void needToCheck(); -public slots: + public slots: void onInit(); void onStart(const AudioMsgId &audio, qint64 position); @@ -64,7 +68,7 @@ private: void emitError(AudioMsgId::Type type); AudioMsgId clear(AudioMsgId::Type type); - void setStoppedState(AudioPlayer::AudioMsg *m, AudioPlayerState state = AudioPlayerStopped); + void setStoppedState(Mixer::AudioMsg *m, AudioPlayerState state = AudioPlayerStopped); enum SetupError { SetupErrorAtStart = 0, @@ -74,6 +78,9 @@ private: }; void loadData(AudioMsgId audio, qint64 position); AudioPlayerLoader *setupLoader(const AudioMsgId &audio, SetupError &err, qint64 &position); - AudioPlayer::AudioMsg *checkLoader(AudioMsgId::Type type); + Mixer::AudioMsg *checkLoader(AudioMsgId::Type type); }; + +} // namespace Player +} // namespace Media diff --git a/Telegram/SourceFiles/media/media_clip_ffmpeg.cpp b/Telegram/SourceFiles/media/media_clip_ffmpeg.cpp index 3b6f160ddd..ffa2007498 100644 --- a/Telegram/SourceFiles/media/media_clip_ffmpeg.cpp +++ b/Telegram/SourceFiles/media/media_clip_ffmpeg.cpp @@ -187,7 +187,7 @@ ReaderImplementation::ReadResult FFMpegReaderImplementation::readFramesTill(Time } // sync by audio stream - auto correctMs = (frameMs >= 0) ? audioPlayer()->getVideoCorrectedTime(_playId, frameMs, systemMs) : frameMs; + auto correctMs = (frameMs >= 0) ? Player::mixer()->getVideoCorrectedTime(_playId, frameMs, systemMs) : frameMs; if (!_frameRead) { auto readResult = readNextFrame(); if (readResult != ReadResult::Success) { @@ -221,13 +221,13 @@ TimeMs FFMpegReaderImplementation::durationMs() const { void FFMpegReaderImplementation::pauseAudio() { if (_audioStreamId >= 0) { - audioPlayer()->pauseFromVideo(_playId); + Player::mixer()->pauseFromVideo(_playId); } } void FFMpegReaderImplementation::resumeAudio() { if (_audioStreamId >= 0) { - audioPlayer()->resumeFromVideo(_playId); + Player::mixer()->resumeFromVideo(_playId); } } @@ -371,7 +371,7 @@ bool FFMpegReaderImplementation::start(Mode mode, TimeMs &positionMs) { _audioStreamId = av_find_best_stream(_fmtContext, AVMEDIA_TYPE_AUDIO, -1, -1, 0, 0); if (_mode == Mode::OnlyGifv) { if (_audioStreamId >= 0) { // should be no audio stream - _audioStreamId = -1; // do not attempt to access audioPlayer() + _audioStreamId = -1; // do not attempt to access mixer() return false; } if (dataSize() > AnimationInMemory) { @@ -380,7 +380,7 @@ bool FFMpegReaderImplementation::start(Mode mode, TimeMs &positionMs) { if (_codecContext->codec_id != AV_CODEC_ID_H264) { return false; } - } else if (_mode == Mode::Silent || !audioPlayer() || !_playId) { + } else if (_mode == Mode::Silent || !Player::mixer() || !_playId) { _audioStreamId = -1; } @@ -437,7 +437,7 @@ bool FFMpegReaderImplementation::start(Mode mode, TimeMs &positionMs) { if (_audioStreamId >= 0) { int64 position = (positionMs * soundData->frequency) / 1000LL; - audioPlayer()->initFromVideo(_playId, std_::move(soundData), position); + Player::mixer()->initFromVideo(_playId, std_::move(soundData), position); } if (readResult == PacketResult::Ok) { @@ -453,7 +453,7 @@ QString FFMpegReaderImplementation::logData() const { FFMpegReaderImplementation::~FFMpegReaderImplementation() { if (_audioStreamId >= 0) { - audioPlayer()->stopFromVideo(_playId); + Player::mixer()->stopFromVideo(_playId); } clearPacketQueue(); @@ -490,7 +490,7 @@ FFMpegReaderImplementation::PacketResult FFMpegReaderImplementation::readPacket( VideoSoundPart part; part.packet = &_packetNull; part.videoPlayId = _playId; - audioPlayer()->feedFromVideo(std_::move(part)); + Player::mixer()->feedFromVideo(std_::move(part)); } return PacketResult::EndOfFile; } @@ -516,7 +516,7 @@ void FFMpegReaderImplementation::processPacket(AVPacket *packet) { VideoSoundPart part; part.packet = packet; part.videoPlayId = _playId; - audioPlayer()->feedFromVideo(std_::move(part)); + Player::mixer()->feedFromVideo(std_::move(part)); } } else { av_packet_unref(packet); diff --git a/Telegram/SourceFiles/media/player/media_player_cover.cpp b/Telegram/SourceFiles/media/player/media_player_cover.cpp index 3186178641..2964782ff7 100644 --- a/Telegram/SourceFiles/media/player/media_player_cover.cpp +++ b/Telegram/SourceFiles/media/player/media_player_cover.cpp @@ -94,9 +94,7 @@ CoverWidget::CoverWidget(QWidget *parent) : TWidget(parent) handleSeekFinished(value); }); _playPause->setClickedCallback([this] { - if (exists()) { - instance()->playPauseCancelClicked(); - } + instance()->playPauseCancelClicked(); }); updateRepeatTrackIcon(); @@ -110,27 +108,24 @@ CoverWidget::CoverWidget(QWidget *parent) : TWidget(parent) Global::RefSongVolumeChanged().notify(); }); subscribe(Global::RefSongVolumeChanged(), [this] { updateVolumeToggleIcon(); }); - if (exists()) { - subscribe(instance()->repeatChangedNotifier(), [this] { - updateRepeatTrackIcon(); - }); - subscribe(instance()->playlistChangedNotifier(), [this] { - handlePlaylistUpdate(); - }); - subscribe(instance()->updatedNotifier(), [this](const UpdatedEvent &e) { - handleSongUpdate(e); - }); - subscribe(instance()->songChangedNotifier(), [this] { - handleSongChange(); - }); + subscribe(instance()->repeatChangedNotifier(), [this] { + updateRepeatTrackIcon(); + }); + subscribe(instance()->playlistChangedNotifier(), [this] { + handlePlaylistUpdate(); + }); + subscribe(instance()->updatedNotifier(), [this](const UpdatedEvent &e) { + handleSongUpdate(e); + }); + subscribe(instance()->songChangedNotifier(), [this] { handleSongChange(); - if (auto player = audioPlayer()) { - AudioMsgId playing; - auto playbackState = player->currentState(&playing, AudioMsgId::Type::Song); - handleSongUpdate(UpdatedEvent(&playing, &playbackState)); - _playPause->finishTransform(); - } - } + }); + handleSongChange(); + + AudioMsgId playing; + auto playbackState = mixer()->currentState(&playing, AudioMsgId::Type::Song); + handleSongUpdate(UpdatedEvent(&playing, &playbackState)); + _playPause->finishTransform(); } void CoverWidget::setPinCallback(ButtonCallback &&callback) { @@ -148,9 +143,7 @@ void CoverWidget::handleSeekProgress(float64 progress) { if (_seekPositionMs != positionMs) { _seekPositionMs = positionMs; updateTimeLabel(); - if (exists()) { - instance()->startSeeking(); - } + instance()->startSeeking(); } } @@ -161,14 +154,12 @@ void CoverWidget::handleSeekFinished(float64 progress) { _seekPositionMs = -1; AudioMsgId playing; - auto playbackState = audioPlayer()->currentState(&playing, AudioMsgId::Type::Song); + auto playbackState = Media::Player::mixer()->currentState(&playing, AudioMsgId::Type::Song); if (playing && playbackState.duration) { - audioPlayer()->seek(qRound(progress * playbackState.duration)); + Media::Player::mixer()->seek(qRound(progress * playbackState.duration)); } - if (exists()) { - instance()->stopSeeking(); - } + instance()->stopSeeking(); } void CoverWidget::resizeEvent(QResizeEvent *e) { @@ -252,7 +243,7 @@ void CoverWidget::handleSongUpdate(const UpdatedEvent &e) { auto stopped = ((playbackState.state & AudioPlayerStoppedMask) || playbackState.state == AudioPlayerFinishing); auto showPause = !stopped && (playbackState.state == AudioPlayerPlaying || playbackState.state == AudioPlayerResuming || playbackState.state == AudioPlayerStarting); - if (exists() && instance()->isSeeking()) { + if (instance()->isSeeking()) { showPause = true; } auto state = [audio = audioId.audio(), showPause] { @@ -350,16 +341,12 @@ void CoverWidget::createPrevNextButtons() { _previousTrack.create(this, st::mediaPlayerPanelPreviousButton); _previousTrack->show(); _previousTrack->setClickedCallback([this]() { - if (exists()) { - instance()->previous(); - } + instance()->previous(); }); _nextTrack.create(this, st::mediaPlayerPanelNextButton); _nextTrack->show(); _nextTrack->setClickedCallback([this]() { - if (exists()) { - instance()->next(); - } + instance()->next(); }); updatePlayPrevNextPositions(); } diff --git a/Telegram/SourceFiles/media/player/media_player_instance.cpp b/Telegram/SourceFiles/media/player/media_player_instance.cpp index 5ea049e3f4..3e813e100b 100644 --- a/Telegram/SourceFiles/media/player/media_player_instance.cpp +++ b/Telegram/SourceFiles/media/player/media_player_instance.cpp @@ -22,6 +22,7 @@ Copyright (c) 2014-2017 John Preston, https://desktop.telegram.org #include "media/player/media_player_instance.h" #include "media/media_audio.h" +#include "media/media_audio_capture.h" #include "observer_peer.h" namespace Media { @@ -33,24 +34,21 @@ Instance *SingleInstance = nullptr; } // namespace void start() { - audioInit(); - if (audioPlayer()) { - SingleInstance = new Instance(); - } -} + InitAudio(); + Capture::Init(); -bool exists() { - return (audioPlayer() != nullptr); + SingleInstance = new Instance(); } void finish() { delete base::take(SingleInstance); - audioFinish(); + Capture::DeInit(); + DeInitAudio(); } Instance::Instance() { - subscribe(audioPlayer(), [this](const AudioMsgId &audioId) { + subscribe(Media::Player::Updated(), [this](const AudioMsgId &audioId) { if (audioId.type() == AudioMsgId::Type::Song) { handleSongUpdate(audioId); } @@ -153,17 +151,17 @@ Instance *instance() { void Instance::play() { AudioMsgId playing; - auto playbackState = audioPlayer()->currentState(&playing, AudioMsgId::Type::Song); + auto playbackState = mixer()->currentState(&playing, AudioMsgId::Type::Song); if (playing) { if (playbackState.state & AudioPlayerStoppedMask) { - audioPlayer()->play(playing); + mixer()->play(playing); } else { if (playbackState.state == AudioPlayerPausing || playbackState.state == AudioPlayerPaused || playbackState.state == AudioPlayerPausedAtEnd) { - audioPlayer()->pauseresume(AudioMsgId::Type::Song); + mixer()->pauseresume(AudioMsgId::Type::Song); } } } else if (_current) { - audioPlayer()->play(_current); + mixer()->play(_current); } } @@ -171,7 +169,7 @@ void Instance::play(const AudioMsgId &audioId) { if (!audioId || !audioId.audio()->song()) { return; } - audioPlayer()->play(audioId); + mixer()->play(audioId); setCurrent(audioId); if (audioId.audio()->loading()) { documentLoadProgress(audioId.audio()); @@ -180,31 +178,31 @@ void Instance::play(const AudioMsgId &audioId) { void Instance::pause() { AudioMsgId playing; - auto playbackState = audioPlayer()->currentState(&playing, AudioMsgId::Type::Song); + auto playbackState = mixer()->currentState(&playing, AudioMsgId::Type::Song); if (playing) { if (!(playbackState.state & AudioPlayerStoppedMask)) { if (playbackState.state == AudioPlayerStarting || playbackState.state == AudioPlayerResuming || playbackState.state == AudioPlayerPlaying || playbackState.state == AudioPlayerFinishing) { - audioPlayer()->pauseresume(AudioMsgId::Type::Song); + mixer()->pauseresume(AudioMsgId::Type::Song); } } } } void Instance::stop() { - audioPlayer()->stop(AudioMsgId::Type::Song); + mixer()->stop(AudioMsgId::Type::Song); } void Instance::playPause() { AudioMsgId playing; - auto playbackState = audioPlayer()->currentState(&playing, AudioMsgId::Type::Song); + auto playbackState = mixer()->currentState(&playing, AudioMsgId::Type::Song); if (playing) { if (playbackState.state & AudioPlayerStoppedMask) { - audioPlayer()->play(playing); + mixer()->play(playing); } else { - audioPlayer()->pauseresume(AudioMsgId::Type::Song); + mixer()->pauseresume(AudioMsgId::Type::Song); } } else if (_current) { - audioPlayer()->play(_current); + mixer()->play(_current); } } @@ -222,7 +220,7 @@ void Instance::playPauseCancelClicked() { } AudioMsgId playing; - auto playbackState = audioPlayer()->currentState(&playing, AudioMsgId::Type::Song); + auto playbackState = mixer()->currentState(&playing, AudioMsgId::Type::Song); auto stopped = ((playbackState.state & AudioPlayerStoppedMask) || playbackState.state == AudioPlayerFinishing); auto showPause = !stopped && (playbackState.state == AudioPlayerPlaying || playbackState.state == AudioPlayerResuming || playbackState.state == AudioPlayerStarting); auto audio = playing.audio(); @@ -255,7 +253,7 @@ void Instance::documentLoadProgress(DocumentData *document) { template void Instance::emitUpdate(CheckCallback check) { AudioMsgId playing; - auto playbackState = audioPlayer()->currentState(&playing, AudioMsgId::Type::Song); + auto playbackState = mixer()->currentState(&playing, AudioMsgId::Type::Song); if (!playing || !check(playing)) { return; } @@ -265,7 +263,7 @@ void Instance::emitUpdate(CheckCallback check) { if (_isPlaying && playbackState.state == AudioPlayerStoppedAtEnd) { if (_repeatEnabled) { - audioPlayer()->play(_current); + mixer()->play(_current); } else { next(); } diff --git a/Telegram/SourceFiles/media/player/media_player_instance.h b/Telegram/SourceFiles/media/player/media_player_instance.h index 962b2f7b80..d01937cef3 100644 --- a/Telegram/SourceFiles/media/player/media_player_instance.h +++ b/Telegram/SourceFiles/media/player/media_player_instance.h @@ -32,11 +32,6 @@ namespace Player { void start(); void finish(); -// We use this method instead of checking for instance() != nullptr -// because audioPlayer() can be destroyed at any time by an -// error in audio playback, so we check it each time. -bool exists(); - class Instance; Instance *instance(); diff --git a/Telegram/SourceFiles/media/player/media_player_list.cpp b/Telegram/SourceFiles/media/player/media_player_list.cpp index c6049d7b53..c52398941c 100644 --- a/Telegram/SourceFiles/media/player/media_player_list.cpp +++ b/Telegram/SourceFiles/media/player/media_player_list.cpp @@ -31,9 +31,7 @@ namespace Player { ListWidget::ListWidget(QWidget *parent) : TWidget(parent) { setMouseTracking(true); playlistUpdated(); - if (exists()) { - subscribe(instance()->playlistChangedNotifier(), [this] { playlistUpdated(); }); - } + subscribe(instance()->playlistChangedNotifier(), [this] { playlistUpdated(); }); subscribe(Global::RefItemRemoved(), [this](HistoryItem *item) { itemRemoved(item); }); @@ -158,17 +156,15 @@ void ListWidget::itemRemoved(HistoryItem *item) { } QRect ListWidget::getCurrentTrackGeometry() const { - if (exists()) { - auto top = marginTop(); - auto current = instance()->current(); - auto fullMsgId = current.contextId(); - for_const (auto layout, _list) { - auto layoutHeight = layout->height(); - if (layout->getItem()->fullId() == fullMsgId) { - return QRect(0, top, width(), layoutHeight); - } - top += layoutHeight; + auto top = marginTop(); + auto current = instance()->current(); + auto fullMsgId = current.contextId(); + for_const (auto layout, _list) { + auto layoutHeight = layout->height(); + if (layout->getItem()->fullId() == fullMsgId) { + return QRect(0, top, width(), layoutHeight); } + top += layoutHeight; } return QRect(0, height(), width(), 0); } @@ -188,8 +184,7 @@ int ListWidget::marginTop() const { void ListWidget::playlistUpdated() { auto newHeight = 0; - const QList emptyPlaylist; - auto &playlist = exists() ? instance()->playlist() : emptyPlaylist; + auto &playlist = instance()->playlist(); auto playlistSize = playlist.size(); auto existingSize = _list.size(); if (playlistSize > existingSize) { diff --git a/Telegram/SourceFiles/media/player/media_player_widget.cpp b/Telegram/SourceFiles/media/player/media_player_widget.cpp index 6621e27281..d37cb07b02 100644 --- a/Telegram/SourceFiles/media/player/media_player_widget.cpp +++ b/Telegram/SourceFiles/media/player/media_player_widget.cpp @@ -107,9 +107,7 @@ Widget::Widget(QWidget *parent) : TWidget(parent) handleSeekFinished(value); }); _playPause->setClickedCallback([this] { - if (exists()) { - instance()->playPauseCancelClicked(); - } + instance()->playPauseCancelClicked(); }); updateVolumeToggleIcon(); @@ -124,27 +122,24 @@ Widget::Widget(QWidget *parent) : TWidget(parent) instance()->toggleRepeat(); }); - if (exists()) { - subscribe(instance()->repeatChangedNotifier(), [this] { - updateRepeatTrackIcon(); - }); - subscribe(instance()->playlistChangedNotifier(), [this] { - handlePlaylistUpdate(); - }); - subscribe(instance()->updatedNotifier(), [this](const UpdatedEvent &e) { - handleSongUpdate(e); - }); - subscribe(instance()->songChangedNotifier(), [this] { - handleSongChange(); - }); + subscribe(instance()->repeatChangedNotifier(), [this] { + updateRepeatTrackIcon(); + }); + subscribe(instance()->playlistChangedNotifier(), [this] { + handlePlaylistUpdate(); + }); + subscribe(instance()->updatedNotifier(), [this](const UpdatedEvent &e) { + handleSongUpdate(e); + }); + subscribe(instance()->songChangedNotifier(), [this] { handleSongChange(); - if (auto player = audioPlayer()) { - AudioMsgId playing; - auto playbackState = player->currentState(&playing, AudioMsgId::Type::Song); - handleSongUpdate(UpdatedEvent(&playing, &playbackState)); - _playPause->finishTransform(); - } - } + }); + handleSongChange(); + + AudioMsgId playing; + auto playbackState = mixer()->currentState(&playing, AudioMsgId::Type::Song); + handleSongUpdate(UpdatedEvent(&playing, &playbackState)); + _playPause->finishTransform(); } void Widget::updateVolumeToggleIcon() { @@ -201,9 +196,8 @@ void Widget::handleSeekProgress(float64 progress) { if (_seekPositionMs != positionMs) { _seekPositionMs = positionMs; updateTimeLabel(); - if (exists()) { - instance()->startSeeking(); - } + + instance()->startSeeking(); } } @@ -214,14 +208,12 @@ void Widget::handleSeekFinished(float64 progress) { _seekPositionMs = -1; AudioMsgId playing; - auto playbackState = audioPlayer()->currentState(&playing, AudioMsgId::Type::Song); + auto playbackState = mixer()->currentState(&playing, AudioMsgId::Type::Song); if (playing && playbackState.duration) { - audioPlayer()->seek(qRound(progress * playbackState.duration)); + mixer()->seek(qRound(progress * playbackState.duration)); } - if (exists()) { - instance()->stopSeeking(); - } + instance()->stopSeeking(); } void Widget::resizeEvent(QResizeEvent *e) { @@ -260,9 +252,7 @@ void Widget::updateOverLabelsState(QPoint pos) { } void Widget::updateOverLabelsState(bool over) { - if (exists()) { - instance()->playerWidgetOver().notify(over, true); - } + instance()->playerWidgetOver().notify(over, true); } void Widget::updatePlayPrevNextPositions() { @@ -326,7 +316,7 @@ void Widget::handleSongUpdate(const UpdatedEvent &e) { auto stopped = ((playbackState.state & AudioPlayerStoppedMask) || playbackState.state == AudioPlayerFinishing); auto showPause = !stopped && (playbackState.state == AudioPlayerPlaying || playbackState.state == AudioPlayerResuming || playbackState.state == AudioPlayerStarting); - if (exists() && instance()->isSeeking()) { + if (instance()->isSeeking()) { showPause = true; } auto state = [audio = audioId.audio(), showPause] { @@ -425,16 +415,12 @@ void Widget::createPrevNextButtons() { _previousTrack.create(this, st::mediaPlayerPreviousButton); _previousTrack->show(); _previousTrack->setClickedCallback([this]() { - if (exists()) { - instance()->previous(); - } + instance()->previous(); }); _nextTrack.create(this, st::mediaPlayerNextButton); _nextTrack->show(); _nextTrack->setClickedCallback([this]() { - if (exists()) { - instance()->next(); - } + instance()->next(); }); updatePlayPrevNextPositions(); } diff --git a/Telegram/SourceFiles/mediaview.cpp b/Telegram/SourceFiles/mediaview.cpp index bf57a0640b..4660063a4d 100644 --- a/Telegram/SourceFiles/mediaview.cpp +++ b/Telegram/SourceFiles/mediaview.cpp @@ -228,9 +228,7 @@ void MediaView::stopGif() { _videoPaused = _videoStopped = _videoIsSilent = false; _fullScreenVideo = false; _clipController.destroy(); - if (audioPlayer()) { - disconnect(audioPlayer(), SIGNAL(updated(const AudioMsgId&)), this, SLOT(onVideoPlayProgress(const AudioMsgId&))); - } + disconnect(Media::Player::mixer(), SIGNAL(updated(const AudioMsgId&)), this, SLOT(onVideoPlayProgress(const AudioMsgId&))); } void MediaView::documentUpdated(DocumentData *doc) { @@ -1487,9 +1485,7 @@ void MediaView::createClipController() { connect(_clipController, SIGNAL(toFullScreenPressed()), this, SLOT(onVideoToggleFullScreen())); connect(_clipController, SIGNAL(fromFullScreenPressed()), this, SLOT(onVideoToggleFullScreen())); - if (audioPlayer()) { - connect(audioPlayer(), SIGNAL(updated(const AudioMsgId&)), this, SLOT(onVideoPlayProgress(const AudioMsgId&))); - } + connect(Media::Player::mixer(), SIGNAL(updated(const AudioMsgId&)), this, SLOT(onVideoPlayProgress(const AudioMsgId&))); } void MediaView::setClipControllerGeometry() { @@ -1585,8 +1581,7 @@ void MediaView::onVideoPlayProgress(const AudioMsgId &audioId) { return; } - t_assert(audioPlayer() != nullptr); - auto state = audioPlayer()->currentVideoState(_gif->playId()); + auto state = Media::Player::mixer()->currentVideoState(_gif->playId()); if (state.duration) { updateVideoPlaybackState(state); } diff --git a/Telegram/SourceFiles/overview/overview_layout.cpp b/Telegram/SourceFiles/overview/overview_layout.cpp index 7b92d5cace..795696ec61 100644 --- a/Telegram/SourceFiles/overview/overview_layout.cpp +++ b/Telegram/SourceFiles/overview/overview_layout.cpp @@ -649,14 +649,12 @@ bool Voice::updateStatusText() { statusSize = FileStatusSizeFailed; } else if (_data->loaded()) { statusSize = FileStatusSizeLoaded; - if (audioPlayer()) { - AudioMsgId playing; - auto playbackState = audioPlayer()->currentState(&playing, AudioMsgId::Type::Voice); - if (playing == AudioMsgId(_data, _parent->fullId()) && !(playbackState.state & AudioPlayerStoppedMask) && playbackState.state != AudioPlayerFinishing) { - statusSize = -1 - (playbackState.position / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency)); - realDuration = playbackState.duration / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency); - showPause = (playbackState.state == AudioPlayerPlaying || playbackState.state == AudioPlayerResuming || playbackState.state == AudioPlayerStarting); - } + AudioMsgId playing; + auto playbackState = Media::Player::mixer()->currentState(&playing, AudioMsgId::Type::Voice); + if (playing == AudioMsgId(_data, _parent->fullId()) && !(playbackState.state & AudioPlayerStoppedMask) && playbackState.state != AudioPlayerFinishing) { + statusSize = -1 - (playbackState.position / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency)); + realDuration = playbackState.duration / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency); + showPause = (playbackState.state == AudioPlayerPlaying || playbackState.state == AudioPlayerResuming || playbackState.state == AudioPlayerStarting); } } else { statusSize = FileStatusSizeReady; @@ -935,17 +933,15 @@ bool Document::updateStatusText() { } else if (_data->loaded()) { if (_data->song()) { statusSize = FileStatusSizeLoaded; - if (audioPlayer()) { - AudioMsgId playing; - auto playbackState = audioPlayer()->currentState(&playing, AudioMsgId::Type::Song); - if (playing == AudioMsgId(_data, _parent->fullId()) && !(playbackState.state & AudioPlayerStoppedMask) && playbackState.state != AudioPlayerFinishing) { - statusSize = -1 - (playbackState.position / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency)); - realDuration = playbackState.duration / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency); - showPause = (playbackState.state == AudioPlayerPlaying || playbackState.state == AudioPlayerResuming || playbackState.state == AudioPlayerStarting); - } - if (!showPause && (playing == AudioMsgId(_data, _parent->fullId())) && Media::Player::exists() && Media::Player::instance()->isSeeking()) { - showPause = true; - } + AudioMsgId playing; + auto playbackState = Media::Player::mixer()->currentState(&playing, AudioMsgId::Type::Song); + if (playing == AudioMsgId(_data, _parent->fullId()) && !(playbackState.state & AudioPlayerStoppedMask) && playbackState.state != AudioPlayerFinishing) { + statusSize = -1 - (playbackState.position / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency)); + realDuration = playbackState.duration / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency); + showPause = (playbackState.state == AudioPlayerPlaying || playbackState.state == AudioPlayerResuming || playbackState.state == AudioPlayerStarting); + } + if (!showPause && (playing == AudioMsgId(_data, _parent->fullId())) && Media::Player::instance()->isSeeking()) { + showPause = true; } } else { statusSize = FileStatusSizeLoaded; diff --git a/Telegram/SourceFiles/overviewwidget.cpp b/Telegram/SourceFiles/overviewwidget.cpp index 3989e997b6..3e46ab9647 100644 --- a/Telegram/SourceFiles/overviewwidget.cpp +++ b/Telegram/SourceFiles/overviewwidget.cpp @@ -2091,9 +2091,9 @@ int32 OverviewWidget::lastScrollTop() const { } int32 OverviewWidget::countBestScroll() const { - if (type() == OverviewMusicFiles && audioPlayer()) { + if (type() == OverviewMusicFiles) { AudioMsgId playing; - audioPlayer()->currentState(&playing, AudioMsgId::Type::Song); + Media::Player::mixer()->currentState(&playing, AudioMsgId::Type::Song); if (playing) { int32 top = _inner->itemTop(playing.contextId()); if (top >= 0) { diff --git a/Telegram/SourceFiles/settings.cpp b/Telegram/SourceFiles/settings.cpp index 25eb6e7b7e..90f82748ce 100644 --- a/Telegram/SourceFiles/settings.cpp +++ b/Telegram/SourceFiles/settings.cpp @@ -74,9 +74,6 @@ bool gCompressPastedImage = true; QString gTimeFormat = qsl("hh:mm"); -bool gHasAudioPlayer = true; -bool gHasAudioCapture = true; - RecentEmojiPack gRecentEmojis; RecentEmojisPreload gRecentEmojisPreload; EmojiColorVariants gEmojiVariants; diff --git a/Telegram/SourceFiles/settings.h b/Telegram/SourceFiles/settings.h index bcc3ee0dd1..2801d48b1d 100644 --- a/Telegram/SourceFiles/settings.h +++ b/Telegram/SourceFiles/settings.h @@ -120,9 +120,6 @@ DeclareSetting(DBIScale, ConfigScale); DeclareSetting(bool, CompressPastedImage); DeclareSetting(QString, TimeFormat); -DeclareSetting(bool, HasAudioPlayer); -DeclareSetting(bool, HasAudioCapture); - inline void cChangeTimeFormat(const QString &newFormat) { if (!newFormat.isEmpty()) cSetTimeFormat(newFormat); } diff --git a/Telegram/SourceFiles/shortcuts.cpp b/Telegram/SourceFiles/shortcuts.cpp index ba1f617f14..7d47654cf8 100644 --- a/Telegram/SourceFiles/shortcuts.cpp +++ b/Telegram/SourceFiles/shortcuts.cpp @@ -79,51 +79,33 @@ bool quit_telegram() { //} bool media_play() { - if (Media::Player::exists()) { - Media::Player::instance()->play(); - return true; - } - return false; + Media::Player::instance()->play(); + return true; } bool media_pause() { - if (Media::Player::exists()) { - Media::Player::instance()->pause(); - return true; - } - return false; + Media::Player::instance()->pause(); + return true; } bool media_playpause() { - if (Media::Player::exists()) { - Media::Player::instance()->playPause(); - return true; - } - return false; + Media::Player::instance()->playPause(); + return true; } bool media_stop() { - if (Media::Player::exists()) { - Media::Player::instance()->stop(); - return true; - } - return false; + Media::Player::instance()->stop(); + return true; } bool media_previous() { - if (Media::Player::exists()) { - Media::Player::instance()->previous(); - return true; - } - return false; + Media::Player::instance()->previous(); + return true; } bool media_next() { - if (Media::Player::exists()) { - Media::Player::instance()->next(); - return true; - } - return false; + Media::Player::instance()->next(); + return true; } bool search() { diff --git a/Telegram/SourceFiles/structs.cpp b/Telegram/SourceFiles/structs.cpp index 58071d0e3e..22eed9aa8b 100644 --- a/Telegram/SourceFiles/structs.cpp +++ b/Telegram/SourceFiles/structs.cpp @@ -1161,9 +1161,9 @@ void DocumentOpenClickHandler::doOpen(DocumentData *data, HistoryItem *context, if (!data->date) return; auto msgId = context ? context->fullId() : FullMsgId(); - bool playVoice = data->voice() && audioPlayer(); - bool playMusic = data->song() && audioPlayer(); - bool playVideo = data->isVideo() && audioPlayer(); + bool playVoice = data->voice(); + bool playMusic = data->song(); + bool playVideo = data->isVideo(); bool playAnimation = data->isAnimation(); auto &location = data->location(true); if (auto applyTheme = data->isTheme()) { @@ -1176,26 +1176,26 @@ void DocumentOpenClickHandler::doOpen(DocumentData *data, HistoryItem *context, if (!location.isEmpty() || (!data->data().isEmpty() && (playVoice || playMusic || playVideo || playAnimation))) { if (playVoice) { AudioMsgId playing; - auto playbackState = audioPlayer()->currentState(&playing, AudioMsgId::Type::Voice); + auto playbackState = Media::Player::mixer()->currentState(&playing, AudioMsgId::Type::Voice); if (playing == AudioMsgId(data, msgId) && !(playbackState.state & AudioPlayerStoppedMask) && playbackState.state != AudioPlayerFinishing) { - audioPlayer()->pauseresume(AudioMsgId::Type::Voice); + Media::Player::mixer()->pauseresume(AudioMsgId::Type::Voice); } else { - AudioMsgId audio(data, msgId); - audioPlayer()->play(audio); - audioPlayer()->notify(audio); + auto audio = AudioMsgId(data, msgId); + Media::Player::mixer()->play(audio); + Media::Player::Updated().notify(audio); if (App::main()) { App::main()->mediaMarkRead(data); } } } else if (playMusic) { AudioMsgId playing; - auto playbackState = audioPlayer()->currentState(&playing, AudioMsgId::Type::Song); + auto playbackState = Media::Player::mixer()->currentState(&playing, AudioMsgId::Type::Song); if (playing == AudioMsgId(data, msgId) && !(playbackState.state & AudioPlayerStoppedMask) && playbackState.state != AudioPlayerFinishing) { - audioPlayer()->pauseresume(AudioMsgId::Type::Song); + Media::Player::mixer()->pauseresume(AudioMsgId::Type::Song); } else { - AudioMsgId song(data, msgId); - audioPlayer()->play(song); - audioPlayer()->notify(song); + auto song = AudioMsgId(data, msgId); + Media::Player::mixer()->play(song); + Media::Player::Updated().notify(song); } } else if (playVideo) { if (!data->data().isEmpty()) { @@ -1470,8 +1470,8 @@ void DocumentData::performActionOnLoad() { auto already = loc.name(); auto item = _actionOnLoadMsgId.msg ? App::histItemById(_actionOnLoadMsgId) : nullptr; bool showImage = !isVideo() && (size < App::kImageSizeLimit); - bool playVoice = voice() && audioPlayer() && (_actionOnLoad == ActionOnLoadPlayInline || _actionOnLoad == ActionOnLoadOpen); - bool playMusic = song() && audioPlayer() && (_actionOnLoad == ActionOnLoadPlayInline || _actionOnLoad == ActionOnLoadOpen); + bool playVoice = voice() && (_actionOnLoad == ActionOnLoadPlayInline || _actionOnLoad == ActionOnLoadOpen); + bool playMusic = song() && (_actionOnLoad == ActionOnLoadPlayInline || _actionOnLoad == ActionOnLoadOpen); bool playAnimation = isAnimation() && (_actionOnLoad == ActionOnLoadPlayInline || _actionOnLoad == ActionOnLoadOpen) && showImage && item && item->getMedia(); if (auto applyTheme = isTheme()) { if (!loc.isEmpty() && loc.accessEnable()) { @@ -1483,24 +1483,24 @@ void DocumentData::performActionOnLoad() { if (playVoice) { if (loaded()) { AudioMsgId playing; - auto playbackState = audioPlayer()->currentState(&playing, AudioMsgId::Type::Voice); + auto playbackState = Media::Player::mixer()->currentState(&playing, AudioMsgId::Type::Voice); if (playing == AudioMsgId(this, _actionOnLoadMsgId) && !(playbackState.state & AudioPlayerStoppedMask) && playbackState.state != AudioPlayerFinishing) { - audioPlayer()->pauseresume(AudioMsgId::Type::Voice); + Media::Player::mixer()->pauseresume(AudioMsgId::Type::Voice); } else if (playbackState.state & AudioPlayerStoppedMask) { - audioPlayer()->play(AudioMsgId(this, _actionOnLoadMsgId)); + Media::Player::mixer()->play(AudioMsgId(this, _actionOnLoadMsgId)); if (App::main()) App::main()->mediaMarkRead(this); } } } else if (playMusic) { if (loaded()) { AudioMsgId playing; - auto playbackState = audioPlayer()->currentState(&playing, AudioMsgId::Type::Song); + auto playbackState = Media::Player::mixer()->currentState(&playing, AudioMsgId::Type::Song); if (playing == AudioMsgId(this, _actionOnLoadMsgId) && !(playbackState.state & AudioPlayerStoppedMask) && playbackState.state != AudioPlayerFinishing) { - audioPlayer()->pauseresume(AudioMsgId::Type::Song); + Media::Player::mixer()->pauseresume(AudioMsgId::Type::Song); } else if (playbackState.state & AudioPlayerStoppedMask) { AudioMsgId song(this, _actionOnLoadMsgId); - audioPlayer()->play(song); - audioPlayer()->notify(song); + Media::Player::mixer()->play(song); + Media::Player::Updated().notify(song); } } } else if (playAnimation) { diff --git a/Telegram/gyp/Telegram.gyp b/Telegram/gyp/Telegram.gyp index 2503f1e1ba..60d54c0a3d 100644 --- a/Telegram/gyp/Telegram.gyp +++ b/Telegram/gyp/Telegram.gyp @@ -301,6 +301,8 @@ '<(src_loc)/media/view/media_clip_volume_controller.h', '<(src_loc)/media/media_audio.cpp', '<(src_loc)/media/media_audio.h', + '<(src_loc)/media/media_audio_capture.cpp', + '<(src_loc)/media/media_audio_capture.h', '<(src_loc)/media/media_audio_ffmpeg_loader.cpp', '<(src_loc)/media/media_audio_ffmpeg_loader.h', '<(src_loc)/media/media_audio_loader.cpp',