Audio capture moved to a separate module.

This commit is contained in:
John Preston 2017-01-19 11:24:43 +03:00
parent 28899a642b
commit 6ae68b337d
25 changed files with 1516 additions and 1429 deletions

View File

@ -195,9 +195,7 @@ namespace {
Global::SetLocalPasscode(false);
Global::RefLocalPasscodeChanged().notify();
}
if (audioPlayer()) {
audioPlayer()->stopAndClear();
}
Media::Player::mixer()->stopAndClear();
if (auto w = wnd()) {
w->tempDirDelete(Local::ClearManagerAll);
w->notifyClearFast();
@ -2436,7 +2434,7 @@ namespace {
void playSound() {
if (Global::SoundNotify() && !Platform::Notifications::skipAudio()) {
audioPlayNotify();
Media::Player::PlayNotify();
}
}

View File

@ -1437,47 +1437,45 @@ bool HistoryDocument::updateStatusText() const {
statusSize = _data->loadOffset();
} else if (_data->loaded()) {
statusSize = FileStatusSizeLoaded;
if (audioPlayer()) {
if (_data->voice()) {
AudioMsgId playing;
auto playbackState = audioPlayer()->currentState(&playing, AudioMsgId::Type::Voice);
if (playing == AudioMsgId(_data, _parent->fullId()) && !(playbackState.state & AudioPlayerStoppedMask) && playbackState.state != AudioPlayerFinishing) {
if (auto voice = Get<HistoryDocumentVoice>()) {
bool was = voice->_playback;
voice->ensurePlayback(this);
if (!was || playbackState.position != voice->_playback->_position) {
float64 prg = playbackState.duration ? snap(float64(playbackState.position) / playbackState.duration, 0., 1.) : 0.;
if (voice->_playback->_position < playbackState.position) {
voice->_playback->a_progress.start(prg);
} else {
voice->_playback->a_progress = anim::value(0., prg);
}
voice->_playback->_position = playbackState.position;
voice->_playback->_a_progress.start();
if (_data->voice()) {
AudioMsgId playing;
auto playbackState = Media::Player::mixer()->currentState(&playing, AudioMsgId::Type::Voice);
if (playing == AudioMsgId(_data, _parent->fullId()) && !(playbackState.state & AudioPlayerStoppedMask) && playbackState.state != AudioPlayerFinishing) {
if (auto voice = Get<HistoryDocumentVoice>()) {
bool was = voice->_playback;
voice->ensurePlayback(this);
if (!was || playbackState.position != voice->_playback->_position) {
float64 prg = playbackState.duration ? snap(float64(playbackState.position) / playbackState.duration, 0., 1.) : 0.;
if (voice->_playback->_position < playbackState.position) {
voice->_playback->a_progress.start(prg);
} else {
voice->_playback->a_progress = anim::value(0., prg);
}
voice->_playback->_position = playbackState.position;
voice->_playback->_a_progress.start();
}
}
statusSize = -1 - (playbackState.position / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency));
realDuration = playbackState.duration / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency);
showPause = (playbackState.state == AudioPlayerPlaying || playbackState.state == AudioPlayerResuming || playbackState.state == AudioPlayerStarting);
} else {
if (auto voice = Get<HistoryDocumentVoice>()) {
voice->checkPlaybackFinished();
}
}
} else if (_data->song()) {
AudioMsgId playing;
auto playbackState = audioPlayer()->currentState(&playing, AudioMsgId::Type::Song);
if (playing == AudioMsgId(_data, _parent->fullId()) && !(playbackState.state & AudioPlayerStoppedMask) && playbackState.state != AudioPlayerFinishing) {
statusSize = -1 - (playbackState.position / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency));
realDuration = playbackState.duration / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency);
showPause = (playbackState.state == AudioPlayerPlaying || playbackState.state == AudioPlayerResuming || playbackState.state == AudioPlayerStarting);
} else {
}
if (!showPause && (playing == AudioMsgId(_data, _parent->fullId()))) {
showPause = (Media::Player::exists() && Media::Player::instance()->isSeeking());
statusSize = -1 - (playbackState.position / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency));
realDuration = playbackState.duration / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency);
showPause = (playbackState.state == AudioPlayerPlaying || playbackState.state == AudioPlayerResuming || playbackState.state == AudioPlayerStarting);
} else {
if (auto voice = Get<HistoryDocumentVoice>()) {
voice->checkPlaybackFinished();
}
}
} else if (_data->song()) {
AudioMsgId playing;
auto playbackState = Media::Player::mixer()->currentState(&playing, AudioMsgId::Type::Song);
if (playing == AudioMsgId(_data, _parent->fullId()) && !(playbackState.state & AudioPlayerStoppedMask) && playbackState.state != AudioPlayerFinishing) {
statusSize = -1 - (playbackState.position / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency));
realDuration = playbackState.duration / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency);
showPause = (playbackState.state == AudioPlayerPlaying || playbackState.state == AudioPlayerResuming || playbackState.state == AudioPlayerStarting);
} else {
}
if (!showPause && (playing == AudioMsgId(_data, _parent->fullId()))) {
showPause = Media::Player::instance()->isSeeking();
}
}
} else {
statusSize = FileStatusSizeReady;

View File

@ -52,6 +52,7 @@ Copyright (c) 2014-2017 John Preston, https://desktop.telegram.org
#include "mainwindow.h"
#include "fileuploader.h"
#include "media/media_audio.h"
#include "media/media_audio_capture.h"
#include "localstorage.h"
#include "apiwrap.h"
#include "window/top_bar_widget.h"
@ -3115,11 +3116,9 @@ HistoryWidget::HistoryWidget(QWidget *parent) : TWidget(parent)
connect(_emojiPan, SIGNAL(updateStickers()), this, SLOT(updateStickers()));
connect(&_sendActionStopTimer, SIGNAL(timeout()), this, SLOT(onCancelSendAction()));
connect(&_previewTimer, SIGNAL(timeout()), this, SLOT(onPreviewTimeout()));
if (audioCapture()) {
connect(audioCapture(), SIGNAL(error()), this, SLOT(onRecordError()));
connect(audioCapture(), SIGNAL(updated(quint16,qint32)), this, SLOT(onRecordUpdate(quint16,qint32)));
connect(audioCapture(), SIGNAL(done(QByteArray,VoiceWaveform,qint32)), this, SLOT(onRecordDone(QByteArray,VoiceWaveform,qint32)));
}
connect(Media::Capture::instance(), SIGNAL(error()), this, SLOT(onRecordError()));
connect(Media::Capture::instance(), SIGNAL(updated(quint16,qint32)), this, SLOT(onRecordUpdate(quint16,qint32)));
connect(Media::Capture::instance(), SIGNAL(done(QByteArray,VoiceWaveform,qint32)), this, SLOT(onRecordDone(QByteArray,VoiceWaveform,qint32)));
_attachToggle->setClickedCallback(App::LambdaDelayed(st::historyAttach.ripple.hideDuration, this, [this] {
chooseAttach();
@ -5649,10 +5648,10 @@ void HistoryWidget::leaveToChildEvent(QEvent *e, QWidget *child) { // e -- from
}
void HistoryWidget::recordStartCallback() {
if (!cHasAudioCapture()) {
if (!Media::Capture::instance()->available()) {
return;
}
emit audioCapture()->start();
emit Media::Capture::instance()->start();
_recording = _inField = true;
updateControlsVisibility();
@ -5680,13 +5679,13 @@ void HistoryWidget::mouseReleaseEvent(QMouseEvent *e) {
_attachDrag = DragStateNone;
updateDragAreas();
}
if (_recording && cHasAudioCapture()) {
if (_recording) {
stopRecording(_peer && _inField);
}
}
void HistoryWidget::stopRecording(bool send) {
emit audioCapture()->stop(send);
emit Media::Capture::instance()->stop(send);
a_recordingLevel = anim::value();
_a_recording.stop();
@ -6045,7 +6044,7 @@ bool HistoryWidget::isMuteUnmute() const {
}
bool HistoryWidget::showRecordButton() const {
return cHasAudioCapture() && !_field->hasSendText() && !readyToForward() && !_editMsgId;
return Media::Capture::instance()->available() && !_field->hasSendText() && !readyToForward() && !_editMsgId;
}
bool HistoryWidget::showInlineBotCancel() const {

View File

@ -832,28 +832,24 @@ bool File::updateStatusText() const {
} else if (document->loaded()) {
if (document->voice()) {
statusSize = FileStatusSizeLoaded;
if (audioPlayer()) {
AudioMsgId playing;
auto playbackState = audioPlayer()->currentState(&playing, AudioMsgId::Type::Voice);
if (playing == AudioMsgId(document, FullMsgId()) && !(playbackState.state & AudioPlayerStoppedMask) && playbackState.state != AudioPlayerFinishing) {
statusSize = -1 - (playbackState.position / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency));
realDuration = playbackState.duration / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency);
showPause = (playbackState.state == AudioPlayerPlaying || playbackState.state == AudioPlayerResuming || playbackState.state == AudioPlayerStarting);
}
AudioMsgId playing;
auto playbackState = Media::Player::mixer()->currentState(&playing, AudioMsgId::Type::Voice);
if (playing == AudioMsgId(document, FullMsgId()) && !(playbackState.state & AudioPlayerStoppedMask) && playbackState.state != AudioPlayerFinishing) {
statusSize = -1 - (playbackState.position / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency));
realDuration = playbackState.duration / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency);
showPause = (playbackState.state == AudioPlayerPlaying || playbackState.state == AudioPlayerResuming || playbackState.state == AudioPlayerStarting);
}
} else if (document->song()) {
statusSize = FileStatusSizeLoaded;
if (audioPlayer()) {
AudioMsgId playing;
auto playbackState = audioPlayer()->currentState(&playing, AudioMsgId::Type::Song);
if (playing == AudioMsgId(document, FullMsgId()) && !(playbackState.state & AudioPlayerStoppedMask) && playbackState.state != AudioPlayerFinishing) {
statusSize = -1 - (playbackState.position / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency));
realDuration = playbackState.duration / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency);
showPause = (playbackState.state == AudioPlayerPlaying || playbackState.state == AudioPlayerResuming || playbackState.state == AudioPlayerStarting);
}
if (!showPause && (playing == AudioMsgId(document, FullMsgId())) && Media::Player::exists() && Media::Player::instance()->isSeeking()) {
showPause = true;
}
AudioMsgId playing;
auto playbackState = Media::Player::mixer()->currentState(&playing, AudioMsgId::Type::Song);
if (playing == AudioMsgId(document, FullMsgId()) && !(playbackState.state & AudioPlayerStoppedMask) && playbackState.state != AudioPlayerFinishing) {
statusSize = -1 - (playbackState.position / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency));
realDuration = playbackState.duration / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency);
showPause = (playbackState.state == AudioPlayerPlaying || playbackState.state == AudioPlayerResuming || playbackState.state == AudioPlayerStarting);
}
if (!showPause && (playing == AudioMsgId(document, FullMsgId())) && Media::Player::instance()->isSeeking()) {
showPause = true;
}
} else {
statusSize = FileStatusSizeLoaded;

View File

@ -100,13 +100,11 @@ MainWidget::MainWidget(QWidget *parent) : TWidget(parent)
connect(_topBar, SIGNAL(clicked()), this, SLOT(onTopBarClick()));
connect(_history, SIGNAL(historyShown(History*,MsgId)), this, SLOT(onHistoryShown(History*,MsgId)));
connect(&updateNotifySettingTimer, SIGNAL(timeout()), this, SLOT(onUpdateNotifySettings()));
if (auto player = audioPlayer()) {
subscribe(player, [this](const AudioMsgId &audioId) {
if (audioId.type() != AudioMsgId::Type::Video) {
handleAudioUpdate(audioId);
}
});
}
subscribe(Media::Player::Updated(), [this](const AudioMsgId &audioId) {
if (audioId.type() != AudioMsgId::Type::Video) {
handleAudioUpdate(audioId);
}
});
subscribe(Global::RefDialogsListFocused(), [this](bool) {
updateDialogsWidthAnimated();
@ -133,30 +131,28 @@ MainWidget::MainWidget(QWidget *parent) : TWidget(parent)
});
connect(&_cacheBackgroundTimer, SIGNAL(timeout()), this, SLOT(onCacheBackground()));
if (Media::Player::exists()) {
_playerPanel->setPinCallback([this] { switchToFixedPlayer(); });
_playerPanel->setCloseCallback([this] { closeBothPlayers(); });
subscribe(Media::Player::instance()->titleButtonOver(), [this](bool over) {
if (over) {
_playerPanel->showFromOther();
} else {
_playerPanel->hideFromOther();
_playerPanel->setPinCallback([this] { switchToFixedPlayer(); });
_playerPanel->setCloseCallback([this] { closeBothPlayers(); });
subscribe(Media::Player::instance()->titleButtonOver(), [this](bool over) {
if (over) {
_playerPanel->showFromOther();
} else {
_playerPanel->hideFromOther();
}
});
subscribe(Media::Player::instance()->playerWidgetOver(), [this](bool over) {
if (over) {
if (_playerPlaylist->isHidden()) {
auto position = mapFromGlobal(QCursor::pos()).x();
auto bestPosition = _playerPlaylist->bestPositionFor(position);
if (rtl()) bestPosition = position + 2 * (position - bestPosition) - _playerPlaylist->width();
updateMediaPlaylistPosition(bestPosition);
}
});
subscribe(Media::Player::instance()->playerWidgetOver(), [this](bool over) {
if (over) {
if (_playerPlaylist->isHidden()) {
auto position = mapFromGlobal(QCursor::pos()).x();
auto bestPosition = _playerPlaylist->bestPositionFor(position);
if (rtl()) bestPosition = position + 2 * (position - bestPosition) - _playerPlaylist->width();
updateMediaPlaylistPosition(bestPosition);
}
_playerPlaylist->showFromOther();
} else {
_playerPlaylist->hideFromOther();
}
});
}
_playerPlaylist->showFromOther();
} else {
_playerPlaylist->hideFromOther();
}
});
subscribe(Adaptive::Changed(), [this]() { handleAdaptiveLayoutUpdate(); });
@ -1568,10 +1564,10 @@ void MainWidget::ui_autoplayMediaInlineAsync(qint32 channelId, qint32 msgId) {
void MainWidget::handleAudioUpdate(const AudioMsgId &audioId) {
AudioMsgId playing;
auto playbackState = audioPlayer()->currentState(&playing, audioId.type());
auto playbackState = Media::Player::mixer()->currentState(&playing, audioId.type());
if (playing == audioId && playbackState.state == AudioPlayerStoppedAtStart) {
playbackState.state = AudioPlayerStopped;
audioPlayer()->clearStoppedAtStart(audioId);
Media::Player::mixer()->clearStoppedAtStart(audioId);
auto document = audioId.audio();
auto filepath = document->filepath(DocumentData::FilePathResolveSaveFromData);
@ -1584,7 +1580,7 @@ void MainWidget::handleAudioUpdate(const AudioMsgId &audioId) {
if (playing == audioId && audioId.type() == AudioMsgId::Type::Song) {
if (!(playbackState.state & AudioPlayerStoppedMask) && playbackState.state != AudioPlayerFinishing) {
if (!_playerUsingPanel && !_player && Media::Player::exists()) {
if (!_playerUsingPanel && !_player) {
createPlayer();
}
} else if (_player && _player->isHidden() && !_playerUsingPanel) {
@ -1642,15 +1638,10 @@ void MainWidget::closeBothPlayers() {
}
_playerVolume.destroyDelayed();
if (Media::Player::exists()) {
Media::Player::instance()->usePanelPlayer().notify(false, true);
}
Media::Player::instance()->usePanelPlayer().notify(false, true);
_playerPanel->hideIgnoringEnterEvents();
_playerPlaylist->hideIgnoringEnterEvents();
if (Media::Player::exists()) {
Media::Player::instance()->stop();
}
Media::Player::instance()->stop();
Shortcuts::disableMediaShortcuts();
}
@ -1685,7 +1676,7 @@ void MainWidget::playerHeightUpdated() {
}
if (!_playerHeight && _player->isHidden()) {
AudioMsgId playing;
auto playbackState = audioPlayer()->currentState(&playing, AudioMsgId::Type::Song);
auto playbackState = Media::Player::mixer()->currentState(&playing, AudioMsgId::Type::Song);
if (playing && (playbackState.state & AudioPlayerStoppedMask)) {
_playerVolume.destroyDelayed();
_player.destroyDelayed();
@ -1714,9 +1705,7 @@ void MainWidget::documentLoadProgress(DocumentData *document) {
App::wnd()->documentUpdated(document);
if (!document->loaded() && document->song()) {
if (Media::Player::exists()) {
Media::Player::instance()->documentLoadProgress(document);
}
Media::Player::instance()->documentLoadProgress(document);
}
}

File diff suppressed because it is too large Load Diff

View File

@ -20,13 +20,6 @@ Copyright (c) 2014-2017 John Preston, https://desktop.telegram.org
*/
#pragma once
#include "core/basic_types.h"
void audioInit();
bool audioWorks();
void audioPlayNotify();
void audioFinish();
enum AudioPlayerState {
AudioPlayerStopped = 0x01,
AudioPlayerStoppedAtEnd = 0x02,
@ -43,9 +36,6 @@ enum AudioPlayerState {
AudioPlayerResuming = 0x38,
};
class AudioPlayerFader;
class AudioPlayerLoaders;
struct VideoSoundData;
struct VideoSoundPart;
struct AudioPlaybackState {
@ -55,11 +45,25 @@ struct AudioPlaybackState {
int32 frequency = 0;
};
class AudioPlayer : public QObject, public base::Observable<AudioMsgId>, private base::Subscriber {
namespace Media {
namespace Player {
void InitAudio();
void DeInitAudio();
base::Observable<AudioMsgId> &Updated();
bool CreateAudioPlaybackDevice();
void PlayNotify();
class Fader;
class Loaders;
class Mixer : public QObject, private base::Subscriber {
Q_OBJECT
public:
AudioPlayer();
Mixer();
void play(const AudioMsgId &audio, int64 position = 0);
void pauseresume(AudioMsgId::Type type, bool fast = false);
@ -81,11 +85,9 @@ public:
void clearStoppedAtStart(const AudioMsgId &audio);
void resumeDevice();
~Mixer();
~AudioPlayer();
private slots:
private slots:
void onError(const AudioMsgId &audio);
void onStopped(const AudioMsgId &audio);
@ -161,60 +163,22 @@ private:
QMutex _mutex;
friend class AudioPlayerFader;
friend class AudioPlayerLoaders;
friend class Fader;
friend class Loaders;
QThread _faderThread, _loaderThread;
AudioPlayerFader *_fader;
AudioPlayerLoaders *_loader;
Fader *_fader;
Loaders *_loader;
};
namespace internal {
Mixer *mixer();
QMutex *audioPlayerMutex();
float64 audioSuppressGain();
float64 audioSuppressSongGain();
bool audioCheckError();
} // namespace internal
class AudioCaptureInner;
class AudioCapture : public QObject {
class Fader : public QObject {
Q_OBJECT
public:
AudioCapture();
bool check();
~AudioCapture();
signals:
void start();
void stop(bool needResult);
void done(QByteArray data, VoiceWaveform waveform, qint32 samples);
void updated(quint16 level, qint32 samples);
void error();
private:
friend class AudioCaptureInner;
QThread _captureThread;
AudioCaptureInner *_capture;
};
AudioPlayer *audioPlayer();
AudioCapture *audioCapture();
class AudioPlayerFader : public QObject {
Q_OBJECT
public:
AudioPlayerFader(QThread *thread);
Fader(QThread *thread);
void resumeDevice();
signals:
@ -244,8 +208,8 @@ private:
EmitPositionUpdated = 0x04,
EmitNeedToPreload = 0x08,
};
int32 updateOnePlayback(AudioPlayer::AudioMsg *m, bool &hasPlaying, bool &hasFading, float64 suppressGain, bool suppressGainChanged);
void setStoppedState(AudioPlayer::AudioMsg *m, AudioPlayerState state = AudioPlayerStopped);
int32 updateOnePlayback(Mixer::AudioMsg *m, bool &hasPlaying, bool &hasFading, float64 suppressGain, bool suppressGainChanged);
void setStoppedState(Mixer::AudioMsg *m, AudioPlayerState state = AudioPlayerStopped);
QTimer _timer, _pauseTimer;
QMutex _pauseMutex;
@ -263,42 +227,17 @@ private:
};
struct AudioCapturePrivate;
struct AVFrame;
} // namespace Player
} // namespace Media
class AudioCaptureInner : public QObject {
Q_OBJECT
namespace internal {
public:
AudioCaptureInner(QThread *thread);
~AudioCaptureInner();
QMutex *audioPlayerMutex();
float64 audioSuppressGain();
float64 audioSuppressSongGain();
bool audioCheckError();
signals:
void error();
void updated(quint16 level, qint32 samples);
void done(QByteArray data, VoiceWaveform waveform, qint32 samples);
public slots:
void onInit();
void onStart();
void onStop(bool needResult);
void onTimeout();
private:
void processFrame(int32 offset, int32 framesize);
void writeFrame(AVFrame *frame);
// Writes the packets till EAGAIN is got from av_receive_packet()
// Returns number of packets written or -1 on error
int writePackets();
AudioCapturePrivate *d;
QTimer _timer;
QByteArray _captured;
};
} // namespace internal
MTPDocumentAttribute audioReadSongAttributes(const QString &fname, const QByteArray &data, QImage &cover, QByteArray &coverBytes, QByteArray &coverFormat);
VoiceWaveform audioCountWaveform(const FileLocation &file, const QByteArray &data);

View File

@ -0,0 +1,700 @@
/*
This file is part of Telegram Desktop,
the official desktop version of Telegram messaging app, see https://telegram.org
Telegram Desktop is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
It is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
In addition, as a special exception, the copyright holders give permission
to link the code of portions of this program with the OpenSSL library.
Full license: https://github.com/telegramdesktop/tdesktop/blob/master/LICENSE
Copyright (c) 2014-2017 John Preston, https://desktop.telegram.org
*/
#include "stdafx.h"
#include "media/media_audio_capture.h"
#include "media/media_audio_ffmpeg_loader.h"
#include <AL/al.h>
#include <AL/alc.h>
#define AL_ALEXT_PROTOTYPES
#include <AL/alext.h>
namespace Media {
namespace Capture {
namespace {
Instance *CaptureInstance = nullptr;
bool ErrorHappened(ALCdevice *device) {
ALenum errCode;
if ((errCode = alcGetError(device)) != ALC_NO_ERROR) {
LOG(("Audio Capture Error: %1, %2").arg(errCode).arg((const char *)alcGetString(device, errCode)));
return true;
}
return false;
}
} // namespace
void Init() {
t_assert(CaptureInstance == nullptr);
CaptureInstance = new Instance();
instance()->check();
}
void DeInit() {
delete base::take(CaptureInstance);
}
Instance::Instance() : _inner(new Inner(&_thread)) {
CaptureInstance = this;
connect(this, SIGNAL(start()), _inner, SLOT(onStart()));
connect(this, SIGNAL(stop(bool)), _inner, SLOT(onStop(bool)));
connect(_inner, SIGNAL(done(QByteArray, VoiceWaveform, qint32)), this, SIGNAL(done(QByteArray, VoiceWaveform, qint32)));
connect(_inner, SIGNAL(updated(quint16, qint32)), this, SIGNAL(updated(quint16, qint32)));
connect(_inner, SIGNAL(error()), this, SIGNAL(error()));
connect(&_thread, SIGNAL(started()), _inner, SLOT(onInit()));
connect(&_thread, SIGNAL(finished()), _inner, SLOT(deleteLater()));
_thread.start();
}
void Instance::check() {
_available = false;
if (auto defaultDevice = alcGetString(0, ALC_CAPTURE_DEFAULT_DEVICE_SPECIFIER)) {
if (auto device = alcCaptureOpenDevice(defaultDevice, AudioVoiceMsgFrequency, AL_FORMAT_MONO16, AudioVoiceMsgFrequency / 5)) {
auto error = ErrorHappened(device);
alcCaptureCloseDevice(device);
_available = !error;
}
}
}
Instance::~Instance() {
_inner = nullptr;
_thread.quit();
_thread.wait();
}
Instance *instance() {
return CaptureInstance;
}
struct Instance::Inner::Private {
ALCdevice *device = nullptr;
AVOutputFormat *fmt = nullptr;
uchar *ioBuffer = nullptr;
AVIOContext *ioContext = nullptr;
AVFormatContext *fmtContext = nullptr;
AVStream *stream = nullptr;
AVCodec *codec = nullptr;
AVCodecContext *codecContext = nullptr;
bool opened = false;
int srcSamples = 0;
int dstSamples = 0;
int maxDstSamples = 0;
int dstSamplesSize = 0;
int fullSamples = 0;
uint8_t **srcSamplesData = nullptr;
uint8_t **dstSamplesData = nullptr;
SwrContext *swrContext = nullptr;
int32 lastUpdate = 0;
uint16 levelMax = 0;
QByteArray data;
int32 dataPos = 0;
int64 waveformMod = 0;
int64 waveformEach = (AudioVoiceMsgFrequency / 100);
uint16 waveformPeak = 0;
QVector<uchar> waveform;
static int _read_data(void *opaque, uint8_t *buf, int buf_size) {
auto l = reinterpret_cast<Private*>(opaque);
int32 nbytes = qMin(l->data.size() - l->dataPos, int32(buf_size));
if (nbytes <= 0) {
return 0;
}
memcpy(buf, l->data.constData() + l->dataPos, nbytes);
l->dataPos += nbytes;
return nbytes;
}
static int _write_data(void *opaque, uint8_t *buf, int buf_size) {
auto l = reinterpret_cast<Private*>(opaque);
if (buf_size <= 0) return 0;
if (l->dataPos + buf_size > l->data.size()) l->data.resize(l->dataPos + buf_size);
memcpy(l->data.data() + l->dataPos, buf, buf_size);
l->dataPos += buf_size;
return buf_size;
}
static int64_t _seek_data(void *opaque, int64_t offset, int whence) {
auto l = reinterpret_cast<Private*>(opaque);
int32 newPos = -1;
switch (whence) {
case SEEK_SET: newPos = offset; break;
case SEEK_CUR: newPos = l->dataPos + offset; break;
case SEEK_END: newPos = l->data.size() + offset; break;
}
if (newPos < 0) {
return -1;
}
l->dataPos = newPos;
return l->dataPos;
}
};
Instance::Inner::Inner(QThread *thread) : d(new Private()) {
moveToThread(thread);
_timer.moveToThread(thread);
connect(&_timer, SIGNAL(timeout()), this, SLOT(onTimeout()));
}
Instance::Inner::~Inner() {
onStop(false);
delete d;
}
void Instance::Inner::onInit() {
}
void Instance::Inner::onStart() {
// Start OpenAL Capture
const ALCchar *dName = alcGetString(0, ALC_CAPTURE_DEFAULT_DEVICE_SPECIFIER);
DEBUG_LOG(("Audio Info: Capture device name '%1'").arg(dName));
d->device = alcCaptureOpenDevice(dName, AudioVoiceMsgFrequency, AL_FORMAT_MONO16, AudioVoiceMsgFrequency / 5);
if (!d->device) {
LOG(("Audio Error: capture device not present!"));
emit error();
return;
}
alcCaptureStart(d->device);
if (ErrorHappened(d->device)) {
alcCaptureCloseDevice(d->device);
d->device = nullptr;
emit error();
return;
}
// Create encoding context
d->ioBuffer = (uchar*)av_malloc(AVBlockSize);
d->ioContext = avio_alloc_context(d->ioBuffer, AVBlockSize, 1, static_cast<void*>(d), &Private::_read_data, &Private::_write_data, &Private::_seek_data);
int res = 0;
char err[AV_ERROR_MAX_STRING_SIZE] = { 0 };
AVOutputFormat *fmt = 0;
while ((fmt = av_oformat_next(fmt))) {
if (fmt->name == qstr("opus")) {
break;
}
}
if (!fmt) {
LOG(("Audio Error: Unable to find opus AVOutputFormat for capture"));
onStop(false);
emit error();
return;
}
if ((res = avformat_alloc_output_context2(&d->fmtContext, fmt, 0, 0)) < 0) {
LOG(("Audio Error: Unable to avformat_alloc_output_context2 for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res)));
onStop(false);
emit error();
return;
}
d->fmtContext->pb = d->ioContext;
d->fmtContext->flags |= AVFMT_FLAG_CUSTOM_IO;
d->opened = true;
// Add audio stream
d->codec = avcodec_find_encoder(fmt->audio_codec);
if (!d->codec) {
LOG(("Audio Error: Unable to avcodec_find_encoder for capture"));
onStop(false);
emit error();
return;
}
d->stream = avformat_new_stream(d->fmtContext, d->codec);
if (!d->stream) {
LOG(("Audio Error: Unable to avformat_new_stream for capture"));
onStop(false);
emit error();
return;
}
d->stream->id = d->fmtContext->nb_streams - 1;
d->codecContext = avcodec_alloc_context3(d->codec);
if (!d->codecContext) {
LOG(("Audio Error: Unable to avcodec_alloc_context3 for capture"));
onStop(false);
emit error();
return;
}
av_opt_set_int(d->codecContext, "refcounted_frames", 1, 0);
d->codecContext->sample_fmt = AV_SAMPLE_FMT_FLTP;
d->codecContext->bit_rate = 64000;
d->codecContext->channel_layout = AV_CH_LAYOUT_MONO;
d->codecContext->sample_rate = AudioVoiceMsgFrequency;
d->codecContext->channels = 1;
if (d->fmtContext->oformat->flags & AVFMT_GLOBALHEADER) {
d->codecContext->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
// Open audio stream
if ((res = avcodec_open2(d->codecContext, d->codec, nullptr)) < 0) {
LOG(("Audio Error: Unable to avcodec_open2 for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res)));
onStop(false);
emit error();
return;
}
// Alloc source samples
d->srcSamples = (d->codecContext->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE) ? 10000 : d->codecContext->frame_size;
//if ((res = av_samples_alloc_array_and_samples(&d->srcSamplesData, 0, d->codecContext->channels, d->srcSamples, d->codecContext->sample_fmt, 0)) < 0) {
// LOG(("Audio Error: Unable to av_samples_alloc_array_and_samples for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res)));
// onStop(false);
// emit error();
// return;
//}
// Using _captured directly
// Prepare resampling
d->swrContext = swr_alloc();
if (!d->swrContext) {
fprintf(stderr, "Could not allocate resampler context\n");
exit(1);
}
av_opt_set_int(d->swrContext, "in_channel_count", d->codecContext->channels, 0);
av_opt_set_int(d->swrContext, "in_sample_rate", d->codecContext->sample_rate, 0);
av_opt_set_sample_fmt(d->swrContext, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
av_opt_set_int(d->swrContext, "out_channel_count", d->codecContext->channels, 0);
av_opt_set_int(d->swrContext, "out_sample_rate", d->codecContext->sample_rate, 0);
av_opt_set_sample_fmt(d->swrContext, "out_sample_fmt", d->codecContext->sample_fmt, 0);
if ((res = swr_init(d->swrContext)) < 0) {
LOG(("Audio Error: Unable to swr_init for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res)));
onStop(false);
emit error();
return;
}
d->maxDstSamples = d->srcSamples;
if ((res = av_samples_alloc_array_and_samples(&d->dstSamplesData, 0, d->codecContext->channels, d->maxDstSamples, d->codecContext->sample_fmt, 0)) < 0) {
LOG(("Audio Error: Unable to av_samples_alloc_array_and_samples for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res)));
onStop(false);
emit error();
return;
}
d->dstSamplesSize = av_samples_get_buffer_size(0, d->codecContext->channels, d->maxDstSamples, d->codecContext->sample_fmt, 0);
if ((res = avcodec_parameters_from_context(d->stream->codecpar, d->codecContext)) < 0) {
LOG(("Audio Error: Unable to avcodec_parameters_from_context for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res)));
onStop(false);
emit error();
return;
}
// Write file header
if ((res = avformat_write_header(d->fmtContext, 0)) < 0) {
LOG(("Audio Error: Unable to avformat_write_header for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res)));
onStop(false);
emit error();
return;
}
_timer.start(50);
_captured.clear();
_captured.reserve(AudioVoiceMsgBufferSize);
DEBUG_LOG(("Audio Capture: started!"));
}
void Instance::Inner::onStop(bool needResult) {
if (!_timer.isActive()) return; // in onStop() already
_timer.stop();
if (d->device) {
alcCaptureStop(d->device);
onTimeout(); // get last data
}
// Write what is left
if (!_captured.isEmpty()) {
int32 fadeSamples = AudioVoiceMsgFade * AudioVoiceMsgFrequency / 1000, capturedSamples = _captured.size() / sizeof(short);
if ((_captured.size() % sizeof(short)) || (d->fullSamples + capturedSamples < AudioVoiceMsgFrequency) || (capturedSamples < fadeSamples)) {
d->fullSamples = 0;
d->dataPos = 0;
d->data.clear();
d->waveformMod = 0;
d->waveformPeak = 0;
d->waveform.clear();
} else {
float64 coef = 1. / fadeSamples, fadedFrom = 0;
for (short *ptr = ((short*)_captured.data()) + capturedSamples, *end = ptr - fadeSamples; ptr != end; ++fadedFrom) {
--ptr;
*ptr = qRound(fadedFrom * coef * *ptr);
}
if (capturedSamples % d->srcSamples) {
int32 s = _captured.size();
_captured.resize(s + (d->srcSamples - (capturedSamples % d->srcSamples)) * sizeof(short));
memset(_captured.data() + s, 0, _captured.size() - s);
}
int32 framesize = d->srcSamples * d->codecContext->channels * sizeof(short), encoded = 0;
while (_captured.size() >= encoded + framesize) {
processFrame(encoded, framesize);
encoded += framesize;
}
writeFrame(nullptr); // drain the codec
if (encoded != _captured.size()) {
d->fullSamples = 0;
d->dataPos = 0;
d->data.clear();
d->waveformMod = 0;
d->waveformPeak = 0;
d->waveform.clear();
}
}
}
DEBUG_LOG(("Audio Capture: stopping (need result: %1), size: %2, samples: %3").arg(Logs::b(needResult)).arg(d->data.size()).arg(d->fullSamples));
_captured = QByteArray();
// Finish stream
if (d->device) {
av_write_trailer(d->fmtContext);
}
QByteArray result = d->fullSamples ? d->data : QByteArray();
VoiceWaveform waveform;
qint32 samples = d->fullSamples;
if (samples && !d->waveform.isEmpty()) {
int64 count = d->waveform.size(), sum = 0;
if (count >= WaveformSamplesCount) {
QVector<uint16> peaks;
peaks.reserve(WaveformSamplesCount);
uint16 peak = 0;
for (int32 i = 0; i < count; ++i) {
uint16 sample = uint16(d->waveform.at(i)) * 256;
if (peak < sample) {
peak = sample;
}
sum += WaveformSamplesCount;
if (sum >= count) {
sum -= count;
peaks.push_back(peak);
peak = 0;
}
}
int64 sum = std::accumulate(peaks.cbegin(), peaks.cend(), 0ULL);
peak = qMax(int32(sum * 1.8 / peaks.size()), 2500);
waveform.resize(peaks.size());
for (int32 i = 0, l = peaks.size(); i != l; ++i) {
waveform[i] = char(qMin(31U, uint32(qMin(peaks.at(i), peak)) * 31 / peak));
}
}
}
if (d->device) {
alcCaptureStop(d->device);
alcCaptureCloseDevice(d->device);
d->device = nullptr;
if (d->codecContext) {
avcodec_free_context(&d->codecContext);
d->codecContext = nullptr;
}
if (d->srcSamplesData) {
if (d->srcSamplesData[0]) {
av_freep(&d->srcSamplesData[0]);
}
av_freep(&d->srcSamplesData);
}
if (d->dstSamplesData) {
if (d->dstSamplesData[0]) {
av_freep(&d->dstSamplesData[0]);
}
av_freep(&d->dstSamplesData);
}
d->fullSamples = 0;
if (d->swrContext) {
swr_free(&d->swrContext);
d->swrContext = nullptr;
}
if (d->opened) {
avformat_close_input(&d->fmtContext);
d->opened = false;
}
if (d->ioContext) {
av_freep(&d->ioContext->buffer);
av_freep(&d->ioContext);
d->ioBuffer = nullptr;
} else if (d->ioBuffer) {
av_freep(&d->ioBuffer);
}
if (d->fmtContext) {
avformat_free_context(d->fmtContext);
d->fmtContext = nullptr;
}
d->fmt = nullptr;
d->stream = nullptr;
d->codec = nullptr;
d->lastUpdate = 0;
d->levelMax = 0;
d->dataPos = 0;
d->data.clear();
d->waveformMod = 0;
d->waveformPeak = 0;
d->waveform.clear();
}
if (needResult) emit done(result, waveform, samples);
}
void Instance::Inner::onTimeout() {
if (!d->device) {
_timer.stop();
return;
}
ALint samples;
alcGetIntegerv(d->device, ALC_CAPTURE_SAMPLES, sizeof(samples), &samples);
if (ErrorHappened(d->device)) {
onStop(false);
emit error();
return;
}
if (samples > 0) {
// Get samples from OpenAL
int32 s = _captured.size(), news = s + samples * sizeof(short);
if (news / AudioVoiceMsgBufferSize > s / AudioVoiceMsgBufferSize) {
_captured.reserve(((news / AudioVoiceMsgBufferSize) + 1) * AudioVoiceMsgBufferSize);
}
_captured.resize(news);
alcCaptureSamples(d->device, (ALCvoid *)(_captured.data() + s), samples);
if (ErrorHappened(d->device)) {
onStop(false);
emit error();
return;
}
// Count new recording level and update view
int32 skipSamples = AudioVoiceMsgSkip * AudioVoiceMsgFrequency / 1000, fadeSamples = AudioVoiceMsgFade * AudioVoiceMsgFrequency / 1000;
int32 levelindex = d->fullSamples + (s / sizeof(short));
for (const short *ptr = (const short*)(_captured.constData() + s), *end = (const short*)(_captured.constData() + news); ptr < end; ++ptr, ++levelindex) {
if (levelindex > skipSamples) {
uint16 value = qAbs(*ptr);
if (levelindex < skipSamples + fadeSamples) {
value = qRound(value * float64(levelindex - skipSamples) / fadeSamples);
}
if (d->levelMax < value) {
d->levelMax = value;
}
}
}
qint32 samplesFull = d->fullSamples + _captured.size() / sizeof(short), samplesSinceUpdate = samplesFull - d->lastUpdate;
if (samplesSinceUpdate > AudioVoiceMsgUpdateView * AudioVoiceMsgFrequency / 1000) {
emit updated(d->levelMax, samplesFull);
d->lastUpdate = samplesFull;
d->levelMax = 0;
}
// Write frames
int32 framesize = d->srcSamples * d->codecContext->channels * sizeof(short), encoded = 0;
while (uint32(_captured.size()) >= encoded + framesize + fadeSamples * sizeof(short)) {
processFrame(encoded, framesize);
encoded += framesize;
}
// Collapse the buffer
if (encoded > 0) {
int32 goodSize = _captured.size() - encoded;
memmove(_captured.data(), _captured.constData() + encoded, goodSize);
_captured.resize(goodSize);
}
} else {
DEBUG_LOG(("Audio Capture: no samples to capture."));
}
}
void Instance::Inner::processFrame(int32 offset, int32 framesize) {
// Prepare audio frame
if (framesize % sizeof(short)) { // in the middle of a sample
LOG(("Audio Error: Bad framesize in writeFrame() for capture, framesize %1, %2").arg(framesize));
onStop(false);
emit error();
return;
}
int32 samplesCnt = framesize / sizeof(short);
int res = 0;
char err[AV_ERROR_MAX_STRING_SIZE] = { 0 };
auto srcSamplesDataChannel = (short*)(_captured.data() + offset);
auto srcSamplesData = &srcSamplesDataChannel;
// memcpy(d->srcSamplesData[0], _captured.constData() + offset, framesize);
int32 skipSamples = AudioVoiceMsgSkip * AudioVoiceMsgFrequency / 1000, fadeSamples = AudioVoiceMsgFade * AudioVoiceMsgFrequency / 1000;
if (d->fullSamples < skipSamples + fadeSamples) {
int32 fadedCnt = qMin(samplesCnt, skipSamples + fadeSamples - d->fullSamples);
float64 coef = 1. / fadeSamples, fadedFrom = d->fullSamples - skipSamples;
short *ptr = srcSamplesDataChannel, *zeroEnd = ptr + qMin(samplesCnt, qMax(0, skipSamples - d->fullSamples)), *end = ptr + fadedCnt;
for (; ptr != zeroEnd; ++ptr, ++fadedFrom) {
*ptr = 0;
}
for (; ptr != end; ++ptr, ++fadedFrom) {
*ptr = qRound(fadedFrom * coef * *ptr);
}
}
d->waveform.reserve(d->waveform.size() + (samplesCnt / d->waveformEach) + 1);
for (short *ptr = srcSamplesDataChannel, *end = ptr + samplesCnt; ptr != end; ++ptr) {
uint16 value = qAbs(*ptr);
if (d->waveformPeak < value) {
d->waveformPeak = value;
}
if (++d->waveformMod == d->waveformEach) {
d->waveformMod -= d->waveformEach;
d->waveform.push_back(uchar(d->waveformPeak / 256));
d->waveformPeak = 0;
}
}
// Convert to final format
d->dstSamples = av_rescale_rnd(swr_get_delay(d->swrContext, d->codecContext->sample_rate) + d->srcSamples, d->codecContext->sample_rate, d->codecContext->sample_rate, AV_ROUND_UP);
if (d->dstSamples > d->maxDstSamples) {
d->maxDstSamples = d->dstSamples;
av_freep(&d->dstSamplesData[0]);
if ((res = av_samples_alloc(d->dstSamplesData, 0, d->codecContext->channels, d->dstSamples, d->codecContext->sample_fmt, 1)) < 0) {
LOG(("Audio Error: Unable to av_samples_alloc for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res)));
onStop(false);
emit error();
return;
}
d->dstSamplesSize = av_samples_get_buffer_size(0, d->codecContext->channels, d->maxDstSamples, d->codecContext->sample_fmt, 0);
}
if ((res = swr_convert(d->swrContext, d->dstSamplesData, d->dstSamples, (const uint8_t **)srcSamplesData, d->srcSamples)) < 0) {
LOG(("Audio Error: Unable to swr_convert for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res)));
onStop(false);
emit error();
return;
}
// Write audio frame
AVFrame *frame = av_frame_alloc();
frame->nb_samples = d->dstSamples;
frame->pts = av_rescale_q(d->fullSamples, AVRational { 1, d->codecContext->sample_rate }, d->codecContext->time_base);
avcodec_fill_audio_frame(frame, d->codecContext->channels, d->codecContext->sample_fmt, d->dstSamplesData[0], d->dstSamplesSize, 0);
writeFrame(frame);
d->fullSamples += samplesCnt;
av_frame_free(&frame);
}
void Instance::Inner::writeFrame(AVFrame *frame) {
int res = 0;
char err[AV_ERROR_MAX_STRING_SIZE] = { 0 };
res = avcodec_send_frame(d->codecContext, frame);
if (res == AVERROR(EAGAIN)) {
int packetsWritten = writePackets();
if (packetsWritten < 0) {
if (frame && packetsWritten == AVERROR_EOF) {
LOG(("Audio Error: EOF in packets received when EAGAIN was got in avcodec_send_frame()"));
onStop(false);
emit error();
}
return;
} else if (!packetsWritten) {
LOG(("Audio Error: No packets received when EAGAIN was got in avcodec_send_frame()"));
onStop(false);
emit error();
return;
}
res = avcodec_send_frame(d->codecContext, frame);
}
if (res < 0) {
LOG(("Audio Error: Unable to avcodec_send_frame for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res)));
onStop(false);
emit error();
return;
}
if (!frame) { // drain
if ((res = writePackets()) != AVERROR_EOF) {
LOG(("Audio Error: not EOF in packets received when draining the codec, result %1").arg(res));
onStop(false);
emit error();
}
}
}
int Instance::Inner::writePackets() {
AVPacket pkt;
memset(&pkt, 0, sizeof(pkt)); // data and size must be 0;
int res = 0;
char err[AV_ERROR_MAX_STRING_SIZE] = { 0 };
int written = 0;
do {
av_init_packet(&pkt);
if ((res = avcodec_receive_packet(d->codecContext, &pkt)) < 0) {
if (res == AVERROR(EAGAIN)) {
return written;
} else if (res == AVERROR_EOF) {
return res;
}
LOG(("Audio Error: Unable to avcodec_receive_packet for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res)));
onStop(false);
emit error();
return res;
}
av_packet_rescale_ts(&pkt, d->codecContext->time_base, d->stream->time_base);
pkt.stream_index = d->stream->index;
if ((res = av_interleaved_write_frame(d->fmtContext, &pkt)) < 0) {
LOG(("Audio Error: Unable to av_interleaved_write_frame for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res)));
onStop(false);
emit error();
return -1;
}
++written;
av_packet_unref(&pkt);
} while (true);
return written;
}
} // namespace Capture
} // namespace Media

View File

@ -0,0 +1,101 @@
/*
This file is part of Telegram Desktop,
the official desktop version of Telegram messaging app, see https://telegram.org
Telegram Desktop is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
It is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
In addition, as a special exception, the copyright holders give permission
to link the code of portions of this program with the OpenSSL library.
Full license: https://github.com/telegramdesktop/tdesktop/blob/master/LICENSE
Copyright (c) 2014-2017 John Preston, https://desktop.telegram.org
*/
#pragma once
struct AVFrame;
namespace Media {
namespace Capture {
void Init();
void DeInit();
class Instance : public QObject {
Q_OBJECT
public:
Instance();
void check();
bool available() const {
return _available;
}
~Instance();
signals:
void start();
void stop(bool needResult);
void done(QByteArray data, VoiceWaveform waveform, qint32 samples);
void updated(quint16 level, qint32 samples);
void error();
private:
class Inner;
friend class Inner;
bool _available = false;
QThread _thread;
Inner *_inner;
};
Instance *instance();
class Instance::Inner : public QObject {
Q_OBJECT
public:
Inner(QThread *thread);
~Inner();
signals:
void error();
void updated(quint16 level, qint32 samples);
void done(QByteArray data, VoiceWaveform waveform, qint32 samples);
public slots:
void onInit();
void onStart();
void onStop(bool needResult);
void onTimeout();
private:
void processFrame(int32 offset, int32 framesize);
void writeFrame(AVFrame *frame);
// Writes the packets till EAGAIN is got from av_receive_packet()
// Returns number of packets written or -1 on error
int writePackets();
struct Private;
Private *d;
QTimer _timer;
QByteArray _captured;
};
} // namespace Capture
} // namespace Media

View File

@ -25,13 +25,16 @@ Copyright (c) 2014-2017 John Preston, https://desktop.telegram.org
#include "media/media_audio_ffmpeg_loader.h"
#include "media/media_child_ffmpeg_loader.h"
AudioPlayerLoaders::AudioPlayerLoaders(QThread *thread) : _fromVideoNotify(this, "onVideoSoundAdded") {
namespace Media {
namespace Player {
Loaders::Loaders(QThread *thread) : _fromVideoNotify(this, "onVideoSoundAdded") {
moveToThread(thread);
connect(thread, SIGNAL(started()), this, SLOT(onInit()));
connect(thread, SIGNAL(finished()), this, SLOT(deleteLater()));
}
void AudioPlayerLoaders::feedFromVideo(VideoSoundPart &&part) {
void Loaders::feedFromVideo(VideoSoundPart &&part) {
bool invoke = false;
{
QMutexLocker lock(&_fromVideoMutex);
@ -47,17 +50,17 @@ void AudioPlayerLoaders::feedFromVideo(VideoSoundPart &&part) {
}
}
void AudioPlayerLoaders::startFromVideo(uint64 videoPlayId) {
void Loaders::startFromVideo(uint64 videoPlayId) {
QMutexLocker lock(&_fromVideoMutex);
_fromVideoPlayId = videoPlayId;
clearFromVideoQueue();
}
void AudioPlayerLoaders::stopFromVideo() {
void Loaders::stopFromVideo() {
startFromVideo(0);
}
void AudioPlayerLoaders::onVideoSoundAdded() {
void Loaders::onVideoSoundAdded() {
bool waitingAndAdded = false;
{
QMutexLocker lock(&_fromVideoMutex);
@ -71,12 +74,12 @@ void AudioPlayerLoaders::onVideoSoundAdded() {
}
}
AudioPlayerLoaders::~AudioPlayerLoaders() {
Loaders::~Loaders() {
QMutexLocker lock(&_fromVideoMutex);
clearFromVideoQueue();
}
void AudioPlayerLoaders::clearFromVideoQueue() {
void Loaders::clearFromVideoQueue() {
auto queue = base::take(_fromVideoQueue);
for (auto &packetData : queue) {
AVPacket packet;
@ -85,18 +88,17 @@ void AudioPlayerLoaders::clearFromVideoQueue() {
}
}
void AudioPlayerLoaders::onInit() {
void Loaders::onInit() {
}
void AudioPlayerLoaders::onStart(const AudioMsgId &audio, qint64 position) {
void Loaders::onStart(const AudioMsgId &audio, qint64 position) {
auto type = audio.type();
clear(type);
{
QMutexLocker lock(internal::audioPlayerMutex());
AudioPlayer *voice = audioPlayer();
if (!voice) return;
if (!mixer()) return;
auto data = voice->dataForType(type);
auto data = mixer()->dataForType(type);
if (!data) return;
data->loading = true;
@ -105,7 +107,7 @@ void AudioPlayerLoaders::onStart(const AudioMsgId &audio, qint64 position) {
loadData(audio, position);
}
AudioMsgId AudioPlayerLoaders::clear(AudioMsgId::Type type) {
AudioMsgId Loaders::clear(AudioMsgId::Type type) {
AudioMsgId result;
switch (type) {
case AudioMsgId::Type::Voice: std::swap(result, _audio); _audioLoader = nullptr; break;
@ -115,20 +117,20 @@ AudioMsgId AudioPlayerLoaders::clear(AudioMsgId::Type type) {
return result;
}
void AudioPlayerLoaders::setStoppedState(AudioPlayer::AudioMsg *m, AudioPlayerState state) {
void Loaders::setStoppedState(Mixer::AudioMsg *m, AudioPlayerState state) {
m->playbackState.state = state;
m->playbackState.position = 0;
}
void AudioPlayerLoaders::emitError(AudioMsgId::Type type) {
void Loaders::emitError(AudioMsgId::Type type) {
emit error(clear(type));
}
void AudioPlayerLoaders::onLoad(const AudioMsgId &audio) {
void Loaders::onLoad(const AudioMsgId &audio) {
loadData(audio, 0);
}
void AudioPlayerLoaders::loadData(AudioMsgId audio, qint64 position) {
void Loaders::loadData(AudioMsgId audio, qint64 position) {
SetupError err = SetupNoErrorStarted;
auto type = audio.type();
AudioPlayerLoader *l = setupLoader(audio, err, position);
@ -156,7 +158,7 @@ void AudioPlayerLoaders::loadData(AudioMsgId audio, qint64 position) {
if (errAtStart) {
{
QMutexLocker lock(internal::audioPlayerMutex());
AudioPlayer::AudioMsg *m = checkLoader(type);
auto m = checkLoader(type);
if (m) m->playbackState.state = AudioPlayerStoppedAtStart;
}
emitError(type);
@ -185,7 +187,7 @@ void AudioPlayerLoaders::loadData(AudioMsgId audio, qint64 position) {
}
QMutexLocker lock(internal::audioPlayerMutex());
AudioPlayer::AudioMsg *m = checkLoader(type);
auto m = checkLoader(type);
if (!m) {
clear(type);
return;
@ -294,8 +296,6 @@ void AudioPlayerLoaders::loadData(AudioMsgId audio, qint64 position) {
alGetSourcei(m->source, AL_SOURCE_STATE, &state);
if (internal::audioCheckError()) {
if (state != AL_PLAYING) {
audioPlayer()->resumeDevice();
switch (type) {
case AudioMsgId::Type::Voice: alSourcef(m->source, AL_GAIN, internal::audioSuppressGain()); break;
case AudioMsgId::Type::Song: alSourcef(m->source, AL_GAIN, internal::audioSuppressSongGain() * Global::SongVolume()); break;
@ -323,13 +323,12 @@ void AudioPlayerLoaders::loadData(AudioMsgId audio, qint64 position) {
}
}
AudioPlayerLoader *AudioPlayerLoaders::setupLoader(const AudioMsgId &audio, SetupError &err, qint64 &position) {
AudioPlayerLoader *Loaders::setupLoader(const AudioMsgId &audio, SetupError &err, qint64 &position) {
err = SetupErrorAtStart;
QMutexLocker lock(internal::audioPlayerMutex());
AudioPlayer *voice = audioPlayer();
if (!voice) return nullptr;
if (!mixer()) return nullptr;
auto data = voice->dataForType(audio.type());
auto data = mixer()->dataForType(audio.type());
if (!data || data->audio != audio || !data->loading) {
emit error(audio);
LOG(("Audio Error: trying to load part of audio, that is not current at the moment"));
@ -395,11 +394,10 @@ AudioPlayerLoader *AudioPlayerLoaders::setupLoader(const AudioMsgId &audio, Setu
return l;
}
AudioPlayer::AudioMsg *AudioPlayerLoaders::checkLoader(AudioMsgId::Type type) {
AudioPlayer *voice = audioPlayer();
if (!voice) return 0;
Mixer::AudioMsg *Loaders::checkLoader(AudioMsgId::Type type) {
if (!mixer()) return nullptr;
auto data = voice->dataForType(type);
auto data = mixer()->dataForType(type);
bool isGoodId = false;
AudioPlayerLoader *l = nullptr;
switch (type) {
@ -417,7 +415,7 @@ AudioPlayer::AudioMsg *AudioPlayerLoaders::checkLoader(AudioMsgId::Type type) {
return data;
}
void AudioPlayerLoaders::onCancel(const AudioMsgId &audio) {
void Loaders::onCancel(const AudioMsgId &audio) {
switch (audio.type()) {
case AudioMsgId::Type::Voice: if (_audio == audio) clear(audio.type()); break;
case AudioMsgId::Type::Song: if (_song == audio) clear(audio.type()); break;
@ -425,13 +423,15 @@ void AudioPlayerLoaders::onCancel(const AudioMsgId &audio) {
}
QMutexLocker lock(internal::audioPlayerMutex());
AudioPlayer *voice = audioPlayer();
if (!voice) return;
if (!mixer()) return;
for (int i = 0; i < AudioSimultaneousLimit; ++i) {
auto data = voice->dataForType(audio.type(), i);
auto data = mixer()->dataForType(audio.type(), i);
if (data->audio == audio) {
data->loading = false;
}
}
}
} // namespace Player
} // namespace Media

View File

@ -26,21 +26,25 @@ Copyright (c) 2014-2017 John Preston, https://desktop.telegram.org
class AudioPlayerLoader;
class ChildFFMpegLoader;
class AudioPlayerLoaders : public QObject {
namespace Media {
namespace Player {
class Loaders : public QObject {
Q_OBJECT
public:
AudioPlayerLoaders(QThread *thread);
Loaders(QThread *thread);
void startFromVideo(uint64 videoPlayId);
void stopFromVideo();
void feedFromVideo(VideoSoundPart &&part);
~AudioPlayerLoaders();
~Loaders();
signals:
void error(const AudioMsgId &audio);
void needToCheck();
public slots:
public slots:
void onInit();
void onStart(const AudioMsgId &audio, qint64 position);
@ -64,7 +68,7 @@ private:
void emitError(AudioMsgId::Type type);
AudioMsgId clear(AudioMsgId::Type type);
void setStoppedState(AudioPlayer::AudioMsg *m, AudioPlayerState state = AudioPlayerStopped);
void setStoppedState(Mixer::AudioMsg *m, AudioPlayerState state = AudioPlayerStopped);
enum SetupError {
SetupErrorAtStart = 0,
@ -74,6 +78,9 @@ private:
};
void loadData(AudioMsgId audio, qint64 position);
AudioPlayerLoader *setupLoader(const AudioMsgId &audio, SetupError &err, qint64 &position);
AudioPlayer::AudioMsg *checkLoader(AudioMsgId::Type type);
Mixer::AudioMsg *checkLoader(AudioMsgId::Type type);
};
} // namespace Player
} // namespace Media

View File

@ -187,7 +187,7 @@ ReaderImplementation::ReadResult FFMpegReaderImplementation::readFramesTill(Time
}
// sync by audio stream
auto correctMs = (frameMs >= 0) ? audioPlayer()->getVideoCorrectedTime(_playId, frameMs, systemMs) : frameMs;
auto correctMs = (frameMs >= 0) ? Player::mixer()->getVideoCorrectedTime(_playId, frameMs, systemMs) : frameMs;
if (!_frameRead) {
auto readResult = readNextFrame();
if (readResult != ReadResult::Success) {
@ -221,13 +221,13 @@ TimeMs FFMpegReaderImplementation::durationMs() const {
void FFMpegReaderImplementation::pauseAudio() {
if (_audioStreamId >= 0) {
audioPlayer()->pauseFromVideo(_playId);
Player::mixer()->pauseFromVideo(_playId);
}
}
void FFMpegReaderImplementation::resumeAudio() {
if (_audioStreamId >= 0) {
audioPlayer()->resumeFromVideo(_playId);
Player::mixer()->resumeFromVideo(_playId);
}
}
@ -371,7 +371,7 @@ bool FFMpegReaderImplementation::start(Mode mode, TimeMs &positionMs) {
_audioStreamId = av_find_best_stream(_fmtContext, AVMEDIA_TYPE_AUDIO, -1, -1, 0, 0);
if (_mode == Mode::OnlyGifv) {
if (_audioStreamId >= 0) { // should be no audio stream
_audioStreamId = -1; // do not attempt to access audioPlayer()
_audioStreamId = -1; // do not attempt to access mixer()
return false;
}
if (dataSize() > AnimationInMemory) {
@ -380,7 +380,7 @@ bool FFMpegReaderImplementation::start(Mode mode, TimeMs &positionMs) {
if (_codecContext->codec_id != AV_CODEC_ID_H264) {
return false;
}
} else if (_mode == Mode::Silent || !audioPlayer() || !_playId) {
} else if (_mode == Mode::Silent || !Player::mixer() || !_playId) {
_audioStreamId = -1;
}
@ -437,7 +437,7 @@ bool FFMpegReaderImplementation::start(Mode mode, TimeMs &positionMs) {
if (_audioStreamId >= 0) {
int64 position = (positionMs * soundData->frequency) / 1000LL;
audioPlayer()->initFromVideo(_playId, std_::move(soundData), position);
Player::mixer()->initFromVideo(_playId, std_::move(soundData), position);
}
if (readResult == PacketResult::Ok) {
@ -453,7 +453,7 @@ QString FFMpegReaderImplementation::logData() const {
FFMpegReaderImplementation::~FFMpegReaderImplementation() {
if (_audioStreamId >= 0) {
audioPlayer()->stopFromVideo(_playId);
Player::mixer()->stopFromVideo(_playId);
}
clearPacketQueue();
@ -490,7 +490,7 @@ FFMpegReaderImplementation::PacketResult FFMpegReaderImplementation::readPacket(
VideoSoundPart part;
part.packet = &_packetNull;
part.videoPlayId = _playId;
audioPlayer()->feedFromVideo(std_::move(part));
Player::mixer()->feedFromVideo(std_::move(part));
}
return PacketResult::EndOfFile;
}
@ -516,7 +516,7 @@ void FFMpegReaderImplementation::processPacket(AVPacket *packet) {
VideoSoundPart part;
part.packet = packet;
part.videoPlayId = _playId;
audioPlayer()->feedFromVideo(std_::move(part));
Player::mixer()->feedFromVideo(std_::move(part));
}
} else {
av_packet_unref(packet);

View File

@ -94,9 +94,7 @@ CoverWidget::CoverWidget(QWidget *parent) : TWidget(parent)
handleSeekFinished(value);
});
_playPause->setClickedCallback([this] {
if (exists()) {
instance()->playPauseCancelClicked();
}
instance()->playPauseCancelClicked();
});
updateRepeatTrackIcon();
@ -110,27 +108,24 @@ CoverWidget::CoverWidget(QWidget *parent) : TWidget(parent)
Global::RefSongVolumeChanged().notify();
});
subscribe(Global::RefSongVolumeChanged(), [this] { updateVolumeToggleIcon(); });
if (exists()) {
subscribe(instance()->repeatChangedNotifier(), [this] {
updateRepeatTrackIcon();
});
subscribe(instance()->playlistChangedNotifier(), [this] {
handlePlaylistUpdate();
});
subscribe(instance()->updatedNotifier(), [this](const UpdatedEvent &e) {
handleSongUpdate(e);
});
subscribe(instance()->songChangedNotifier(), [this] {
handleSongChange();
});
subscribe(instance()->repeatChangedNotifier(), [this] {
updateRepeatTrackIcon();
});
subscribe(instance()->playlistChangedNotifier(), [this] {
handlePlaylistUpdate();
});
subscribe(instance()->updatedNotifier(), [this](const UpdatedEvent &e) {
handleSongUpdate(e);
});
subscribe(instance()->songChangedNotifier(), [this] {
handleSongChange();
if (auto player = audioPlayer()) {
AudioMsgId playing;
auto playbackState = player->currentState(&playing, AudioMsgId::Type::Song);
handleSongUpdate(UpdatedEvent(&playing, &playbackState));
_playPause->finishTransform();
}
}
});
handleSongChange();
AudioMsgId playing;
auto playbackState = mixer()->currentState(&playing, AudioMsgId::Type::Song);
handleSongUpdate(UpdatedEvent(&playing, &playbackState));
_playPause->finishTransform();
}
void CoverWidget::setPinCallback(ButtonCallback &&callback) {
@ -148,9 +143,7 @@ void CoverWidget::handleSeekProgress(float64 progress) {
if (_seekPositionMs != positionMs) {
_seekPositionMs = positionMs;
updateTimeLabel();
if (exists()) {
instance()->startSeeking();
}
instance()->startSeeking();
}
}
@ -161,14 +154,12 @@ void CoverWidget::handleSeekFinished(float64 progress) {
_seekPositionMs = -1;
AudioMsgId playing;
auto playbackState = audioPlayer()->currentState(&playing, AudioMsgId::Type::Song);
auto playbackState = Media::Player::mixer()->currentState(&playing, AudioMsgId::Type::Song);
if (playing && playbackState.duration) {
audioPlayer()->seek(qRound(progress * playbackState.duration));
Media::Player::mixer()->seek(qRound(progress * playbackState.duration));
}
if (exists()) {
instance()->stopSeeking();
}
instance()->stopSeeking();
}
void CoverWidget::resizeEvent(QResizeEvent *e) {
@ -252,7 +243,7 @@ void CoverWidget::handleSongUpdate(const UpdatedEvent &e) {
auto stopped = ((playbackState.state & AudioPlayerStoppedMask) || playbackState.state == AudioPlayerFinishing);
auto showPause = !stopped && (playbackState.state == AudioPlayerPlaying || playbackState.state == AudioPlayerResuming || playbackState.state == AudioPlayerStarting);
if (exists() && instance()->isSeeking()) {
if (instance()->isSeeking()) {
showPause = true;
}
auto state = [audio = audioId.audio(), showPause] {
@ -350,16 +341,12 @@ void CoverWidget::createPrevNextButtons() {
_previousTrack.create(this, st::mediaPlayerPanelPreviousButton);
_previousTrack->show();
_previousTrack->setClickedCallback([this]() {
if (exists()) {
instance()->previous();
}
instance()->previous();
});
_nextTrack.create(this, st::mediaPlayerPanelNextButton);
_nextTrack->show();
_nextTrack->setClickedCallback([this]() {
if (exists()) {
instance()->next();
}
instance()->next();
});
updatePlayPrevNextPositions();
}

View File

@ -22,6 +22,7 @@ Copyright (c) 2014-2017 John Preston, https://desktop.telegram.org
#include "media/player/media_player_instance.h"
#include "media/media_audio.h"
#include "media/media_audio_capture.h"
#include "observer_peer.h"
namespace Media {
@ -33,24 +34,21 @@ Instance *SingleInstance = nullptr;
} // namespace
void start() {
audioInit();
if (audioPlayer()) {
SingleInstance = new Instance();
}
}
InitAudio();
Capture::Init();
bool exists() {
return (audioPlayer() != nullptr);
SingleInstance = new Instance();
}
void finish() {
delete base::take(SingleInstance);
audioFinish();
Capture::DeInit();
DeInitAudio();
}
Instance::Instance() {
subscribe(audioPlayer(), [this](const AudioMsgId &audioId) {
subscribe(Media::Player::Updated(), [this](const AudioMsgId &audioId) {
if (audioId.type() == AudioMsgId::Type::Song) {
handleSongUpdate(audioId);
}
@ -153,17 +151,17 @@ Instance *instance() {
void Instance::play() {
AudioMsgId playing;
auto playbackState = audioPlayer()->currentState(&playing, AudioMsgId::Type::Song);
auto playbackState = mixer()->currentState(&playing, AudioMsgId::Type::Song);
if (playing) {
if (playbackState.state & AudioPlayerStoppedMask) {
audioPlayer()->play(playing);
mixer()->play(playing);
} else {
if (playbackState.state == AudioPlayerPausing || playbackState.state == AudioPlayerPaused || playbackState.state == AudioPlayerPausedAtEnd) {
audioPlayer()->pauseresume(AudioMsgId::Type::Song);
mixer()->pauseresume(AudioMsgId::Type::Song);
}
}
} else if (_current) {
audioPlayer()->play(_current);
mixer()->play(_current);
}
}
@ -171,7 +169,7 @@ void Instance::play(const AudioMsgId &audioId) {
if (!audioId || !audioId.audio()->song()) {
return;
}
audioPlayer()->play(audioId);
mixer()->play(audioId);
setCurrent(audioId);
if (audioId.audio()->loading()) {
documentLoadProgress(audioId.audio());
@ -180,31 +178,31 @@ void Instance::play(const AudioMsgId &audioId) {
void Instance::pause() {
AudioMsgId playing;
auto playbackState = audioPlayer()->currentState(&playing, AudioMsgId::Type::Song);
auto playbackState = mixer()->currentState(&playing, AudioMsgId::Type::Song);
if (playing) {
if (!(playbackState.state & AudioPlayerStoppedMask)) {
if (playbackState.state == AudioPlayerStarting || playbackState.state == AudioPlayerResuming || playbackState.state == AudioPlayerPlaying || playbackState.state == AudioPlayerFinishing) {
audioPlayer()->pauseresume(AudioMsgId::Type::Song);
mixer()->pauseresume(AudioMsgId::Type::Song);
}
}
}
}
void Instance::stop() {
audioPlayer()->stop(AudioMsgId::Type::Song);
mixer()->stop(AudioMsgId::Type::Song);
}
void Instance::playPause() {
AudioMsgId playing;
auto playbackState = audioPlayer()->currentState(&playing, AudioMsgId::Type::Song);
auto playbackState = mixer()->currentState(&playing, AudioMsgId::Type::Song);
if (playing) {
if (playbackState.state & AudioPlayerStoppedMask) {
audioPlayer()->play(playing);
mixer()->play(playing);
} else {
audioPlayer()->pauseresume(AudioMsgId::Type::Song);
mixer()->pauseresume(AudioMsgId::Type::Song);
}
} else if (_current) {
audioPlayer()->play(_current);
mixer()->play(_current);
}
}
@ -222,7 +220,7 @@ void Instance::playPauseCancelClicked() {
}
AudioMsgId playing;
auto playbackState = audioPlayer()->currentState(&playing, AudioMsgId::Type::Song);
auto playbackState = mixer()->currentState(&playing, AudioMsgId::Type::Song);
auto stopped = ((playbackState.state & AudioPlayerStoppedMask) || playbackState.state == AudioPlayerFinishing);
auto showPause = !stopped && (playbackState.state == AudioPlayerPlaying || playbackState.state == AudioPlayerResuming || playbackState.state == AudioPlayerStarting);
auto audio = playing.audio();
@ -255,7 +253,7 @@ void Instance::documentLoadProgress(DocumentData *document) {
template <typename CheckCallback>
void Instance::emitUpdate(CheckCallback check) {
AudioMsgId playing;
auto playbackState = audioPlayer()->currentState(&playing, AudioMsgId::Type::Song);
auto playbackState = mixer()->currentState(&playing, AudioMsgId::Type::Song);
if (!playing || !check(playing)) {
return;
}
@ -265,7 +263,7 @@ void Instance::emitUpdate(CheckCallback check) {
if (_isPlaying && playbackState.state == AudioPlayerStoppedAtEnd) {
if (_repeatEnabled) {
audioPlayer()->play(_current);
mixer()->play(_current);
} else {
next();
}

View File

@ -32,11 +32,6 @@ namespace Player {
void start();
void finish();
// We use this method instead of checking for instance() != nullptr
// because audioPlayer() can be destroyed at any time by an
// error in audio playback, so we check it each time.
bool exists();
class Instance;
Instance *instance();

View File

@ -31,9 +31,7 @@ namespace Player {
ListWidget::ListWidget(QWidget *parent) : TWidget(parent) {
setMouseTracking(true);
playlistUpdated();
if (exists()) {
subscribe(instance()->playlistChangedNotifier(), [this] { playlistUpdated(); });
}
subscribe(instance()->playlistChangedNotifier(), [this] { playlistUpdated(); });
subscribe(Global::RefItemRemoved(), [this](HistoryItem *item) {
itemRemoved(item);
});
@ -158,17 +156,15 @@ void ListWidget::itemRemoved(HistoryItem *item) {
}
QRect ListWidget::getCurrentTrackGeometry() const {
if (exists()) {
auto top = marginTop();
auto current = instance()->current();
auto fullMsgId = current.contextId();
for_const (auto layout, _list) {
auto layoutHeight = layout->height();
if (layout->getItem()->fullId() == fullMsgId) {
return QRect(0, top, width(), layoutHeight);
}
top += layoutHeight;
auto top = marginTop();
auto current = instance()->current();
auto fullMsgId = current.contextId();
for_const (auto layout, _list) {
auto layoutHeight = layout->height();
if (layout->getItem()->fullId() == fullMsgId) {
return QRect(0, top, width(), layoutHeight);
}
top += layoutHeight;
}
return QRect(0, height(), width(), 0);
}
@ -188,8 +184,7 @@ int ListWidget::marginTop() const {
void ListWidget::playlistUpdated() {
auto newHeight = 0;
const QList<FullMsgId> emptyPlaylist;
auto &playlist = exists() ? instance()->playlist() : emptyPlaylist;
auto &playlist = instance()->playlist();
auto playlistSize = playlist.size();
auto existingSize = _list.size();
if (playlistSize > existingSize) {

View File

@ -107,9 +107,7 @@ Widget::Widget(QWidget *parent) : TWidget(parent)
handleSeekFinished(value);
});
_playPause->setClickedCallback([this] {
if (exists()) {
instance()->playPauseCancelClicked();
}
instance()->playPauseCancelClicked();
});
updateVolumeToggleIcon();
@ -124,27 +122,24 @@ Widget::Widget(QWidget *parent) : TWidget(parent)
instance()->toggleRepeat();
});
if (exists()) {
subscribe(instance()->repeatChangedNotifier(), [this] {
updateRepeatTrackIcon();
});
subscribe(instance()->playlistChangedNotifier(), [this] {
handlePlaylistUpdate();
});
subscribe(instance()->updatedNotifier(), [this](const UpdatedEvent &e) {
handleSongUpdate(e);
});
subscribe(instance()->songChangedNotifier(), [this] {
handleSongChange();
});
subscribe(instance()->repeatChangedNotifier(), [this] {
updateRepeatTrackIcon();
});
subscribe(instance()->playlistChangedNotifier(), [this] {
handlePlaylistUpdate();
});
subscribe(instance()->updatedNotifier(), [this](const UpdatedEvent &e) {
handleSongUpdate(e);
});
subscribe(instance()->songChangedNotifier(), [this] {
handleSongChange();
if (auto player = audioPlayer()) {
AudioMsgId playing;
auto playbackState = player->currentState(&playing, AudioMsgId::Type::Song);
handleSongUpdate(UpdatedEvent(&playing, &playbackState));
_playPause->finishTransform();
}
}
});
handleSongChange();
AudioMsgId playing;
auto playbackState = mixer()->currentState(&playing, AudioMsgId::Type::Song);
handleSongUpdate(UpdatedEvent(&playing, &playbackState));
_playPause->finishTransform();
}
void Widget::updateVolumeToggleIcon() {
@ -201,9 +196,8 @@ void Widget::handleSeekProgress(float64 progress) {
if (_seekPositionMs != positionMs) {
_seekPositionMs = positionMs;
updateTimeLabel();
if (exists()) {
instance()->startSeeking();
}
instance()->startSeeking();
}
}
@ -214,14 +208,12 @@ void Widget::handleSeekFinished(float64 progress) {
_seekPositionMs = -1;
AudioMsgId playing;
auto playbackState = audioPlayer()->currentState(&playing, AudioMsgId::Type::Song);
auto playbackState = mixer()->currentState(&playing, AudioMsgId::Type::Song);
if (playing && playbackState.duration) {
audioPlayer()->seek(qRound(progress * playbackState.duration));
mixer()->seek(qRound(progress * playbackState.duration));
}
if (exists()) {
instance()->stopSeeking();
}
instance()->stopSeeking();
}
void Widget::resizeEvent(QResizeEvent *e) {
@ -260,9 +252,7 @@ void Widget::updateOverLabelsState(QPoint pos) {
}
void Widget::updateOverLabelsState(bool over) {
if (exists()) {
instance()->playerWidgetOver().notify(over, true);
}
instance()->playerWidgetOver().notify(over, true);
}
void Widget::updatePlayPrevNextPositions() {
@ -326,7 +316,7 @@ void Widget::handleSongUpdate(const UpdatedEvent &e) {
auto stopped = ((playbackState.state & AudioPlayerStoppedMask) || playbackState.state == AudioPlayerFinishing);
auto showPause = !stopped && (playbackState.state == AudioPlayerPlaying || playbackState.state == AudioPlayerResuming || playbackState.state == AudioPlayerStarting);
if (exists() && instance()->isSeeking()) {
if (instance()->isSeeking()) {
showPause = true;
}
auto state = [audio = audioId.audio(), showPause] {
@ -425,16 +415,12 @@ void Widget::createPrevNextButtons() {
_previousTrack.create(this, st::mediaPlayerPreviousButton);
_previousTrack->show();
_previousTrack->setClickedCallback([this]() {
if (exists()) {
instance()->previous();
}
instance()->previous();
});
_nextTrack.create(this, st::mediaPlayerNextButton);
_nextTrack->show();
_nextTrack->setClickedCallback([this]() {
if (exists()) {
instance()->next();
}
instance()->next();
});
updatePlayPrevNextPositions();
}

View File

@ -228,9 +228,7 @@ void MediaView::stopGif() {
_videoPaused = _videoStopped = _videoIsSilent = false;
_fullScreenVideo = false;
_clipController.destroy();
if (audioPlayer()) {
disconnect(audioPlayer(), SIGNAL(updated(const AudioMsgId&)), this, SLOT(onVideoPlayProgress(const AudioMsgId&)));
}
disconnect(Media::Player::mixer(), SIGNAL(updated(const AudioMsgId&)), this, SLOT(onVideoPlayProgress(const AudioMsgId&)));
}
void MediaView::documentUpdated(DocumentData *doc) {
@ -1487,9 +1485,7 @@ void MediaView::createClipController() {
connect(_clipController, SIGNAL(toFullScreenPressed()), this, SLOT(onVideoToggleFullScreen()));
connect(_clipController, SIGNAL(fromFullScreenPressed()), this, SLOT(onVideoToggleFullScreen()));
if (audioPlayer()) {
connect(audioPlayer(), SIGNAL(updated(const AudioMsgId&)), this, SLOT(onVideoPlayProgress(const AudioMsgId&)));
}
connect(Media::Player::mixer(), SIGNAL(updated(const AudioMsgId&)), this, SLOT(onVideoPlayProgress(const AudioMsgId&)));
}
void MediaView::setClipControllerGeometry() {
@ -1585,8 +1581,7 @@ void MediaView::onVideoPlayProgress(const AudioMsgId &audioId) {
return;
}
t_assert(audioPlayer() != nullptr);
auto state = audioPlayer()->currentVideoState(_gif->playId());
auto state = Media::Player::mixer()->currentVideoState(_gif->playId());
if (state.duration) {
updateVideoPlaybackState(state);
}

View File

@ -649,14 +649,12 @@ bool Voice::updateStatusText() {
statusSize = FileStatusSizeFailed;
} else if (_data->loaded()) {
statusSize = FileStatusSizeLoaded;
if (audioPlayer()) {
AudioMsgId playing;
auto playbackState = audioPlayer()->currentState(&playing, AudioMsgId::Type::Voice);
if (playing == AudioMsgId(_data, _parent->fullId()) && !(playbackState.state & AudioPlayerStoppedMask) && playbackState.state != AudioPlayerFinishing) {
statusSize = -1 - (playbackState.position / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency));
realDuration = playbackState.duration / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency);
showPause = (playbackState.state == AudioPlayerPlaying || playbackState.state == AudioPlayerResuming || playbackState.state == AudioPlayerStarting);
}
AudioMsgId playing;
auto playbackState = Media::Player::mixer()->currentState(&playing, AudioMsgId::Type::Voice);
if (playing == AudioMsgId(_data, _parent->fullId()) && !(playbackState.state & AudioPlayerStoppedMask) && playbackState.state != AudioPlayerFinishing) {
statusSize = -1 - (playbackState.position / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency));
realDuration = playbackState.duration / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency);
showPause = (playbackState.state == AudioPlayerPlaying || playbackState.state == AudioPlayerResuming || playbackState.state == AudioPlayerStarting);
}
} else {
statusSize = FileStatusSizeReady;
@ -935,17 +933,15 @@ bool Document::updateStatusText() {
} else if (_data->loaded()) {
if (_data->song()) {
statusSize = FileStatusSizeLoaded;
if (audioPlayer()) {
AudioMsgId playing;
auto playbackState = audioPlayer()->currentState(&playing, AudioMsgId::Type::Song);
if (playing == AudioMsgId(_data, _parent->fullId()) && !(playbackState.state & AudioPlayerStoppedMask) && playbackState.state != AudioPlayerFinishing) {
statusSize = -1 - (playbackState.position / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency));
realDuration = playbackState.duration / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency);
showPause = (playbackState.state == AudioPlayerPlaying || playbackState.state == AudioPlayerResuming || playbackState.state == AudioPlayerStarting);
}
if (!showPause && (playing == AudioMsgId(_data, _parent->fullId())) && Media::Player::exists() && Media::Player::instance()->isSeeking()) {
showPause = true;
}
AudioMsgId playing;
auto playbackState = Media::Player::mixer()->currentState(&playing, AudioMsgId::Type::Song);
if (playing == AudioMsgId(_data, _parent->fullId()) && !(playbackState.state & AudioPlayerStoppedMask) && playbackState.state != AudioPlayerFinishing) {
statusSize = -1 - (playbackState.position / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency));
realDuration = playbackState.duration / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency);
showPause = (playbackState.state == AudioPlayerPlaying || playbackState.state == AudioPlayerResuming || playbackState.state == AudioPlayerStarting);
}
if (!showPause && (playing == AudioMsgId(_data, _parent->fullId())) && Media::Player::instance()->isSeeking()) {
showPause = true;
}
} else {
statusSize = FileStatusSizeLoaded;

View File

@ -2091,9 +2091,9 @@ int32 OverviewWidget::lastScrollTop() const {
}
int32 OverviewWidget::countBestScroll() const {
if (type() == OverviewMusicFiles && audioPlayer()) {
if (type() == OverviewMusicFiles) {
AudioMsgId playing;
audioPlayer()->currentState(&playing, AudioMsgId::Type::Song);
Media::Player::mixer()->currentState(&playing, AudioMsgId::Type::Song);
if (playing) {
int32 top = _inner->itemTop(playing.contextId());
if (top >= 0) {

View File

@ -74,9 +74,6 @@ bool gCompressPastedImage = true;
QString gTimeFormat = qsl("hh:mm");
bool gHasAudioPlayer = true;
bool gHasAudioCapture = true;
RecentEmojiPack gRecentEmojis;
RecentEmojisPreload gRecentEmojisPreload;
EmojiColorVariants gEmojiVariants;

View File

@ -120,9 +120,6 @@ DeclareSetting(DBIScale, ConfigScale);
DeclareSetting(bool, CompressPastedImage);
DeclareSetting(QString, TimeFormat);
DeclareSetting(bool, HasAudioPlayer);
DeclareSetting(bool, HasAudioCapture);
inline void cChangeTimeFormat(const QString &newFormat) {
if (!newFormat.isEmpty()) cSetTimeFormat(newFormat);
}

View File

@ -79,51 +79,33 @@ bool quit_telegram() {
//}
bool media_play() {
if (Media::Player::exists()) {
Media::Player::instance()->play();
return true;
}
return false;
Media::Player::instance()->play();
return true;
}
bool media_pause() {
if (Media::Player::exists()) {
Media::Player::instance()->pause();
return true;
}
return false;
Media::Player::instance()->pause();
return true;
}
bool media_playpause() {
if (Media::Player::exists()) {
Media::Player::instance()->playPause();
return true;
}
return false;
Media::Player::instance()->playPause();
return true;
}
bool media_stop() {
if (Media::Player::exists()) {
Media::Player::instance()->stop();
return true;
}
return false;
Media::Player::instance()->stop();
return true;
}
bool media_previous() {
if (Media::Player::exists()) {
Media::Player::instance()->previous();
return true;
}
return false;
Media::Player::instance()->previous();
return true;
}
bool media_next() {
if (Media::Player::exists()) {
Media::Player::instance()->next();
return true;
}
return false;
Media::Player::instance()->next();
return true;
}
bool search() {

View File

@ -1161,9 +1161,9 @@ void DocumentOpenClickHandler::doOpen(DocumentData *data, HistoryItem *context,
if (!data->date) return;
auto msgId = context ? context->fullId() : FullMsgId();
bool playVoice = data->voice() && audioPlayer();
bool playMusic = data->song() && audioPlayer();
bool playVideo = data->isVideo() && audioPlayer();
bool playVoice = data->voice();
bool playMusic = data->song();
bool playVideo = data->isVideo();
bool playAnimation = data->isAnimation();
auto &location = data->location(true);
if (auto applyTheme = data->isTheme()) {
@ -1176,26 +1176,26 @@ void DocumentOpenClickHandler::doOpen(DocumentData *data, HistoryItem *context,
if (!location.isEmpty() || (!data->data().isEmpty() && (playVoice || playMusic || playVideo || playAnimation))) {
if (playVoice) {
AudioMsgId playing;
auto playbackState = audioPlayer()->currentState(&playing, AudioMsgId::Type::Voice);
auto playbackState = Media::Player::mixer()->currentState(&playing, AudioMsgId::Type::Voice);
if (playing == AudioMsgId(data, msgId) && !(playbackState.state & AudioPlayerStoppedMask) && playbackState.state != AudioPlayerFinishing) {
audioPlayer()->pauseresume(AudioMsgId::Type::Voice);
Media::Player::mixer()->pauseresume(AudioMsgId::Type::Voice);
} else {
AudioMsgId audio(data, msgId);
audioPlayer()->play(audio);
audioPlayer()->notify(audio);
auto audio = AudioMsgId(data, msgId);
Media::Player::mixer()->play(audio);
Media::Player::Updated().notify(audio);
if (App::main()) {
App::main()->mediaMarkRead(data);
}
}
} else if (playMusic) {
AudioMsgId playing;
auto playbackState = audioPlayer()->currentState(&playing, AudioMsgId::Type::Song);
auto playbackState = Media::Player::mixer()->currentState(&playing, AudioMsgId::Type::Song);
if (playing == AudioMsgId(data, msgId) && !(playbackState.state & AudioPlayerStoppedMask) && playbackState.state != AudioPlayerFinishing) {
audioPlayer()->pauseresume(AudioMsgId::Type::Song);
Media::Player::mixer()->pauseresume(AudioMsgId::Type::Song);
} else {
AudioMsgId song(data, msgId);
audioPlayer()->play(song);
audioPlayer()->notify(song);
auto song = AudioMsgId(data, msgId);
Media::Player::mixer()->play(song);
Media::Player::Updated().notify(song);
}
} else if (playVideo) {
if (!data->data().isEmpty()) {
@ -1470,8 +1470,8 @@ void DocumentData::performActionOnLoad() {
auto already = loc.name();
auto item = _actionOnLoadMsgId.msg ? App::histItemById(_actionOnLoadMsgId) : nullptr;
bool showImage = !isVideo() && (size < App::kImageSizeLimit);
bool playVoice = voice() && audioPlayer() && (_actionOnLoad == ActionOnLoadPlayInline || _actionOnLoad == ActionOnLoadOpen);
bool playMusic = song() && audioPlayer() && (_actionOnLoad == ActionOnLoadPlayInline || _actionOnLoad == ActionOnLoadOpen);
bool playVoice = voice() && (_actionOnLoad == ActionOnLoadPlayInline || _actionOnLoad == ActionOnLoadOpen);
bool playMusic = song() && (_actionOnLoad == ActionOnLoadPlayInline || _actionOnLoad == ActionOnLoadOpen);
bool playAnimation = isAnimation() && (_actionOnLoad == ActionOnLoadPlayInline || _actionOnLoad == ActionOnLoadOpen) && showImage && item && item->getMedia();
if (auto applyTheme = isTheme()) {
if (!loc.isEmpty() && loc.accessEnable()) {
@ -1483,24 +1483,24 @@ void DocumentData::performActionOnLoad() {
if (playVoice) {
if (loaded()) {
AudioMsgId playing;
auto playbackState = audioPlayer()->currentState(&playing, AudioMsgId::Type::Voice);
auto playbackState = Media::Player::mixer()->currentState(&playing, AudioMsgId::Type::Voice);
if (playing == AudioMsgId(this, _actionOnLoadMsgId) && !(playbackState.state & AudioPlayerStoppedMask) && playbackState.state != AudioPlayerFinishing) {
audioPlayer()->pauseresume(AudioMsgId::Type::Voice);
Media::Player::mixer()->pauseresume(AudioMsgId::Type::Voice);
} else if (playbackState.state & AudioPlayerStoppedMask) {
audioPlayer()->play(AudioMsgId(this, _actionOnLoadMsgId));
Media::Player::mixer()->play(AudioMsgId(this, _actionOnLoadMsgId));
if (App::main()) App::main()->mediaMarkRead(this);
}
}
} else if (playMusic) {
if (loaded()) {
AudioMsgId playing;
auto playbackState = audioPlayer()->currentState(&playing, AudioMsgId::Type::Song);
auto playbackState = Media::Player::mixer()->currentState(&playing, AudioMsgId::Type::Song);
if (playing == AudioMsgId(this, _actionOnLoadMsgId) && !(playbackState.state & AudioPlayerStoppedMask) && playbackState.state != AudioPlayerFinishing) {
audioPlayer()->pauseresume(AudioMsgId::Type::Song);
Media::Player::mixer()->pauseresume(AudioMsgId::Type::Song);
} else if (playbackState.state & AudioPlayerStoppedMask) {
AudioMsgId song(this, _actionOnLoadMsgId);
audioPlayer()->play(song);
audioPlayer()->notify(song);
Media::Player::mixer()->play(song);
Media::Player::Updated().notify(song);
}
}
} else if (playAnimation) {

View File

@ -301,6 +301,8 @@
'<(src_loc)/media/view/media_clip_volume_controller.h',
'<(src_loc)/media/media_audio.cpp',
'<(src_loc)/media/media_audio.h',
'<(src_loc)/media/media_audio_capture.cpp',
'<(src_loc)/media/media_audio_capture.h',
'<(src_loc)/media/media_audio_ffmpeg_loader.cpp',
'<(src_loc)/media/media_audio_ffmpeg_loader.h',
'<(src_loc)/media/media_audio_loader.cpp',