diff --git a/Telegram/CMakeLists.txt b/Telegram/CMakeLists.txt index 603fd3f75b..bbe81bc503 100644 --- a/Telegram/CMakeLists.txt +++ b/Telegram/CMakeLists.txt @@ -1019,6 +1019,7 @@ PRIVATE media/audio/media_audio.h media/audio/media_audio_capture.cpp media/audio/media_audio_capture.h + media/audio/media_audio_capture_common.h media/audio/media_audio_ffmpeg_loader.cpp media/audio/media_audio_ffmpeg_loader.h media/audio/media_audio_loader.cpp diff --git a/Telegram/SourceFiles/history/view/controls/history_view_voice_record_bar.cpp b/Telegram/SourceFiles/history/view/controls/history_view_voice_record_bar.cpp index db606eded1..95bec71181 100644 --- a/Telegram/SourceFiles/history/view/controls/history_view_voice_record_bar.cpp +++ b/Telegram/SourceFiles/history/view/controls/history_view_voice_record_bar.cpp @@ -84,16 +84,14 @@ enum class FilterType { const int duration = kPrecision * (float64(samples) / ::Media::Player::kDefaultFrequency); const auto durationString = Ui::FormatDurationText(duration / kPrecision); - const auto decimalPart = duration % kPrecision; - return QString("%1%2%3") - .arg(durationString, QLocale().decimalPoint()) - .arg(decimalPart); + const auto decimalPart = QString::number(duration % kPrecision); + return durationString + QLocale().decimalPoint() + decimalPart; } [[nodiscard]] std::unique_ptr ProcessCaptureResult( - const ::Media::Capture::Result &data) { + const VoiceWaveform &waveform) { auto voiceData = std::make_unique(); - voiceData->waveform = data.waveform; + voiceData->waveform = waveform; voiceData->wavemax = voiceData->waveform.empty() ? uchar(0) : *ranges::max_element(voiceData->waveform); @@ -427,12 +425,11 @@ public: not_null parent, const style::RecordBar &st, not_null session, - ::Media::Capture::Result &&data, + ::Media::Capture::Result *data, const style::font &font); void requestPaintProgress(float64 progress); rpl::producer<> stopRequests() const; - ::Media::Capture::Result *data() const; void playPause(); @@ -456,7 +453,7 @@ private: const not_null _document; const std::unique_ptr _voiceData; const std::shared_ptr _mediaView; - const std::unique_ptr<::Media::Capture::Result> _data; + const not_null<::Media::Capture::Result*> _data; const base::unique_qptr _delete; const style::font &_durationFont; const QString _duration; @@ -486,15 +483,15 @@ ListenWrap::ListenWrap( not_null parent, const style::RecordBar &st, not_null session, - ::Media::Capture::Result &&data, + ::Media::Capture::Result *data, const style::font &font) : _parent(parent) , _st(st) , _session(session) , _document(DummyDocument(&session->data())) -, _voiceData(ProcessCaptureResult(data)) +, _voiceData(ProcessCaptureResult(data->waveform)) , _mediaView(_document->createMediaView()) -, _data(std::make_unique<::Media::Capture::Result>(std::move(data))) +, _data(data) , _delete(base::make_unique_q(parent, _st.remove)) , _durationFont(font) , _duration(Ui::FormatDurationText( @@ -817,10 +814,6 @@ rpl::producer<> ListenWrap::stopRequests() const { return _delete->clicks() | rpl::to_empty; } -::Media::Capture::Result *ListenWrap::data() const { - return _data.get(); -} - rpl::lifetime &ListenWrap::lifetime() { return _lifetime; } @@ -1293,12 +1286,14 @@ void VoiceRecordBar::updateTTLGeometry( const auto from = -_ttlButton->width(); const auto right = anim::interpolate(from, finalRight, progress); _ttlButton->moveToRight(right, _ttlButton->y()); +#if 0 } else if (type == TTLAnimationType::TopBottom) { const auto ttlFrom = anyTop - _ttlButton->height() * 2; const auto ttlTo = anyTop - _lock->height(); _ttlButton->moveToLeft( _ttlButton->x(), anim::interpolate(ttlFrom, ttlTo, 1. - progress)); +#endif } else if (type == TTLAnimationType::RightTopStatic) { _ttlButton->moveToRight( -_ttlButton->width(), @@ -1408,48 +1403,7 @@ void VoiceRecordBar::init() { _showLockAnimation.start(std::move(callback), from, to, duration); }, lifetime()); - _lock->setClickedCallback([=] { - if (!_lock->isStopState()) { - return; - } - - ::Media::Capture::instance()->startedChanges( - ) | rpl::filter([=](bool capturing) { - return !capturing && _listen; - }) | rpl::take(1) | rpl::start_with_next([=] { - _lockShowing = false; - - const auto to = 1.; - const auto &duration = st::historyRecordVoiceShowDuration; - auto callback = [=](float64 value) { - _listen->requestPaintProgress(value); - const auto reverseValue = to - value; - _level->requestPaintProgress(reverseValue); - update(); - if (to == value) { - _recordingLifetime.destroy(); - } - updateTTLGeometry(TTLAnimationType::TopBottom, 1. - value); - }; - _showListenAnimation.stop(); - _showListenAnimation.start(std::move(callback), 0., to, duration); - }, lifetime()); - - stopRecording(StopType::Listen); - }); - - _lock->locks( - ) | rpl::start_with_next([=] { - if (_hasTTLFilter && _hasTTLFilter()) { - if (!_ttlButton) { - _ttlButton = std::make_unique( - _outerContainer, - _st); - } - _ttlButton->show(); - } - updateTTLGeometry(TTLAnimationType::RightTopStatic, 0); - + const auto setLevelAsSend = [=] { _level->setType(VoiceRecordButton::Type::Send); _level->clicks( @@ -1464,6 +1418,58 @@ void VoiceRecordBar::init() { ) | rpl::start_with_next([=](bool enter) { _inField = enter; }, _recordingLifetime); + }; + + _lock->setClickedCallback([=] { + if (isListenState()) { + startRecording(); + _listen = nullptr; + setLevelAsSend(); + + return; + } + if (!_lock->isStopState()) { + return; + } + + stopRecording(StopType::Listen); + }); + + _paused.value() | rpl::distinct_until_changed( + ) | rpl::start_with_next([=](bool paused) { + if (!paused) { + return; + } + // _lockShowing = false; + + const auto to = 1.; + const auto &duration = st::historyRecordVoiceShowDuration; + auto callback = [=](float64 value) { + _listen->requestPaintProgress(value); + const auto reverseValue = to - value; + _level->requestPaintProgress(reverseValue); + update(); + if (to == value) { + _recordingLifetime.destroy(); + } + }; + _showListenAnimation.stop(); + _showListenAnimation.start(std::move(callback), 0., to, duration); + }, lifetime()); + + _lock->locks( + ) | rpl::start_with_next([=] { + if (_hasTTLFilter && _hasTTLFilter()) { + if (!_ttlButton) { + _ttlButton = std::make_unique( + _outerContainer, + _st); + } + _ttlButton->show(); + } + updateTTLGeometry(TTLAnimationType::RightTopStatic, 0); + + setLevelAsSend(); const auto &duration = st::historyRecordVoiceShowDuration; const auto from = 0.; @@ -1616,7 +1622,12 @@ void VoiceRecordBar::startRecording() { startRedCircleAnimation(); _recording = true; - instance()->start(); + if (_paused.current()) { + _paused = false; + instance()->pause(false, nullptr); + } else { + instance()->start(); + } instance()->updated( ) | rpl::start_with_next_error([=](const Update &update) { _recordingTipRequired = (update.samples < kMinSamples); @@ -1685,7 +1696,7 @@ void VoiceRecordBar::stop(bool send) { const auto type = send ? StopType::Send : StopType::Cancel; stopRecording(type, ttlBeforeHide); }; - _lockShowing = false; + // _lockShowing = false; visibilityAnimate(false, std::move(disappearanceCallback)); } @@ -1695,6 +1706,7 @@ void VoiceRecordBar::finish() { _inField = false; _redCircleProgress = 0.; _recordingSamples = 0; + _paused = false; _showAnimation.stop(); _lockToStopAnimation.stop(); @@ -1704,6 +1716,8 @@ void VoiceRecordBar::finish() { [[maybe_unused]] const auto s = takeTTLState(); _sendActionUpdates.fire({ Api::SendProgressType::RecordVoice, -1 }); + + _data = {}; } void VoiceRecordBar::hideFast() { @@ -1719,42 +1733,52 @@ void VoiceRecordBar::stopRecording(StopType type, bool ttlBeforeHide) { instance()->stop(crl::guard(this, [=](Result &&data) { _cancelRequests.fire({}); })); - return; - } - instance()->stop(crl::guard(this, [=](Result &&data) { - if (data.bytes.isEmpty()) { - // Close everything. - stop(false); - return; - } + } else if (type == StopType::Listen) { + instance()->pause(true, crl::guard(this, [=](Result &&data) { + if (data.bytes.isEmpty()) { + // Close everything. + stop(false); + return; + } + _paused = true; + _data = std::move(data); - window()->raise(); - window()->activateWindow(); - const auto duration = Duration(data.samples); - if (type == StopType::Send) { + window()->raise(); + window()->activateWindow(); + _listen = std::make_unique( + this, + _st, + &_show->session(), + &_data, + _cancelFont); + _listenChanges.fire({}); + + // _lockShowing = false; + })); + } else if (type == StopType::Send) { + instance()->stop(crl::guard(this, [=](Result &&data) { + if (data.bytes.isEmpty()) { + // Close everything. + stop(false); + return; + } + _data = std::move(data); + + window()->raise(); + window()->activateWindow(); const auto options = Api::SendOptions{ .ttlSeconds = (ttlBeforeHide ? std::numeric_limits::max() : 0), }; _sendVoiceRequests.fire({ - data.bytes, - data.waveform, - duration, + _data.bytes, + _data.waveform, + Duration(_data.samples), options, }); - } else if (type == StopType::Listen) { - _listen = std::make_unique( - this, - _st, - &_show->session(), - std::move(data), - _cancelFont); - _listenChanges.fire({}); - - _lockShowing = false; - } - })); + })); + } } void VoiceRecordBar::drawDuration(QPainter &p) { @@ -1811,14 +1835,13 @@ void VoiceRecordBar::drawMessage(QPainter &p, float64 recordActive) { void VoiceRecordBar::requestToSendWithOptions(Api::SendOptions options) { if (isListenState()) { - const auto data = _listen->data(); if (takeTTLState()) { options.ttlSeconds = std::numeric_limits::max(); } _sendVoiceRequests.fire({ - data->bytes, - data->waveform, - Duration(data->samples), + _data.bytes, + _data.waveform, + Duration(_data.samples), options, }); } @@ -1837,7 +1860,7 @@ rpl::producer<> VoiceRecordBar::cancelRequests() const { } bool VoiceRecordBar::isRecording() const { - return _recording.current(); + return _recording.current() && !_paused.current(); } bool VoiceRecordBar::isRecordingLocked() const { diff --git a/Telegram/SourceFiles/history/view/controls/history_view_voice_record_bar.h b/Telegram/SourceFiles/history/view/controls/history_view_voice_record_bar.h index f1a58465ee..332bf5b1f6 100644 --- a/Telegram/SourceFiles/history/view/controls/history_view_voice_record_bar.h +++ b/Telegram/SourceFiles/history/view/controls/history_view_voice_record_bar.h @@ -10,6 +10,7 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL #include "api/api_common.h" #include "base/timer.h" #include "history/view/controls/compose_controls_common.h" +#include "media/audio/media_audio_capture_common.h" #include "ui/effects/animations.h" #include "ui/round_rect.h" #include "ui/rp_widget.h" @@ -162,6 +163,9 @@ private: std::unique_ptr _ttlButton; std::unique_ptr _listen; + ::Media::Capture::Result _data; + rpl::variable _paused; + base::Timer _startTimer; rpl::event_stream _sendActionUpdates; diff --git a/Telegram/SourceFiles/media/audio/media_audio_capture.cpp b/Telegram/SourceFiles/media/audio/media_audio_capture.cpp index 2a2960dd78..8d72b59123 100644 --- a/Telegram/SourceFiles/media/audio/media_audio_capture.cpp +++ b/Telegram/SourceFiles/media/audio/media_audio_capture.cpp @@ -7,6 +7,7 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL */ #include "media/audio/media_audio_capture.h" +#include "media/audio/media_audio_capture_common.h" #include "media/audio/media_audio_ffmpeg_loader.h" #include "ffmpeg/ffmpeg_utility.h" #include "base/timer.h" @@ -37,6 +38,45 @@ bool ErrorHappened(ALCdevice *device) { return false; } +[[nodiscard]] VoiceWaveform CollectWaveform( + const QVector &waveformVector) { + if (waveformVector.isEmpty()) { + return {}; + } + auto waveform = VoiceWaveform(); + auto count = int64(waveformVector.size()); + auto sum = int64(0); + if (count >= Player::kWaveformSamplesCount) { + auto peaks = QVector(); + peaks.reserve(Player::kWaveformSamplesCount); + + auto peak = uint16(0); + for (auto i = int32(0); i < count; ++i) { + auto sample = uint16(waveformVector.at(i)) * 256; + if (peak < sample) { + peak = sample; + } + sum += Player::kWaveformSamplesCount; + if (sum >= count) { + sum -= count; + peaks.push_back(peak); + peak = 0; + } + } + + auto sum = std::accumulate(peaks.cbegin(), peaks.cend(), 0LL); + peak = qMax(int32(sum * 1.8 / peaks.size()), 2500); + + waveform.resize(peaks.size()); + for (int32 i = 0, l = peaks.size(); i != l; ++i) { + waveform[i] = char(qMin( + 31U, + uint32(qMin(peaks.at(i), peak)) * 31 / peak)); + } + } + return waveform; +} + } // namespace class Instance::Inner final : public QObject { @@ -46,6 +86,7 @@ public: void start(Fn updated, Fn error); void stop(Fn callback = nullptr); + void pause(bool value, Fn callback); private: void process(); @@ -67,6 +108,8 @@ private: base::Timer _timer; QByteArray _captured; + bool _paused = false; + }; void Start() { @@ -118,6 +161,17 @@ void Instance::stop(Fn callback) { }); } +void Instance::pause(bool value, Fn callback) { + Expects(callback != nullptr || !value); + InvokeQueued(_inner.get(), [=] { + _inner->pause(value, [=](Result &&result) { + crl::on_main([=, result = std::move(result)]() mutable { + callback(std::move(result)); + }); + }); + }); +} + void Instance::check() { _available = false; if (auto device = alcGetString(0, ALC_CAPTURE_DEFAULT_DEVICE_SPECIFIER)) { @@ -241,6 +295,9 @@ void Instance::Inner::fail() { void Instance::Inner::start(Fn updated, Fn error) { _updated = std::move(updated); _error = std::move(error); + if (_paused) { + _paused = false; + } // Start OpenAL Capture d->device = alcCaptureOpenDevice(nullptr, kCaptureFrequency, AL_FORMAT_MONO16, kCaptureFrequency / 5); @@ -404,10 +461,23 @@ void Instance::Inner::start(Fn updated, Fn error) { DEBUG_LOG(("Audio Capture: started!")); } +void Instance::Inner::pause(bool value, Fn callback) { + _paused = value; + if (!_paused) { + return; + } + callback({ + d->fullSamples ? d->data : QByteArray(), + d->fullSamples ? CollectWaveform(d->waveform) : VoiceWaveform(), + qint32(d->fullSamples), + }); +} + void Instance::Inner::stop(Fn callback) { if (!_timer.isActive()) { return; // in stop() already } + _paused = false; _timer.cancel(); const auto needResult = (callback != nullptr); @@ -480,33 +550,7 @@ void Instance::Inner::stop(Fn callback) { VoiceWaveform waveform; qint32 samples = d->fullSamples; if (needResult && samples && !d->waveform.isEmpty()) { - int64 count = d->waveform.size(), sum = 0; - if (count >= Player::kWaveformSamplesCount) { - QVector peaks; - peaks.reserve(Player::kWaveformSamplesCount); - - uint16 peak = 0; - for (int32 i = 0; i < count; ++i) { - uint16 sample = uint16(d->waveform.at(i)) * 256; - if (peak < sample) { - peak = sample; - } - sum += Player::kWaveformSamplesCount; - if (sum >= count) { - sum -= count; - peaks.push_back(peak); - peak = 0; - } - } - - auto sum = std::accumulate(peaks.cbegin(), peaks.cend(), 0LL); - peak = qMax(int32(sum * 1.8 / peaks.size()), 2500); - - waveform.resize(peaks.size()); - for (int32 i = 0, l = peaks.size(); i != l; ++i) { - waveform[i] = char(qMin(31U, uint32(qMin(peaks.at(i), peak)) * 31 / peak)); - } - } + waveform = CollectWaveform(d->waveform); } if (hadDevice) { if (d->codecContext) { @@ -568,6 +612,10 @@ void Instance::Inner::stop(Fn callback) { void Instance::Inner::process() { Expects(!d->processing); + if (_paused) { + return; + } + d->processing = true; const auto guard = gsl::finally([&] { d->processing = false; }); diff --git a/Telegram/SourceFiles/media/audio/media_audio_capture.h b/Telegram/SourceFiles/media/audio/media_audio_capture.h index 4bb23c2b08..2b46bd7fac 100644 --- a/Telegram/SourceFiles/media/audio/media_audio_capture.h +++ b/Telegram/SourceFiles/media/audio/media_audio_capture.h @@ -19,11 +19,7 @@ struct Update { ushort level = 0; }; -struct Result { - QByteArray bytes; - VoiceWaveform waveform; - int samples = 0; -}; +struct Result; void Start(); void Finish(); @@ -51,6 +47,7 @@ public: void start(); void stop(Fn callback = nullptr); + void pause(bool value, Fn callback); private: class Inner; diff --git a/Telegram/SourceFiles/media/audio/media_audio_capture_common.h b/Telegram/SourceFiles/media/audio/media_audio_capture_common.h new file mode 100644 index 0000000000..370259f583 --- /dev/null +++ b/Telegram/SourceFiles/media/audio/media_audio_capture_common.h @@ -0,0 +1,18 @@ +/* +This file is part of Telegram Desktop, +the official desktop application for the Telegram messaging service. + +For license and copyright information please follow this link: +https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL +*/ +#pragma once + +namespace Media::Capture { + +struct Result { + QByteArray bytes; + VoiceWaveform waveform; + int samples = 0; +}; + +} // namespace Media::Capture