Allow to choose camera device in Settings.

This commit is contained in:
John Preston 2020-08-18 18:00:33 +04:00
parent e782e065a0
commit aa87d627c9
10 changed files with 163 additions and 54 deletions

View File

@ -370,6 +370,7 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
"lng_settings_adaptive_wide" = "Adaptive layout for wide screens";
"lng_settings_section_call_settings" = "Calls Settings";
"lng_settings_call_camera" = "Camera";
"lng_settings_call_section_output" = "Speakers and headphones";
"lng_settings_call_section_input" = "Microphone";
"lng_settings_call_input_device" = "Input device";
@ -380,7 +381,7 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
"lng_settings_call_stop_mic_test" = "Stop test";
"lng_settings_call_section_other" = "Other settings";
"lng_settings_call_open_system_prefs" = "Open system sound preferences";
"lng_settings_call_device_default" = "Default";
"lng_settings_call_device_default" = "Same as the System";
"lng_settings_call_audio_ducking" = "Mute other sounds during calls";
"lng_settings_language" = "Language";

View File

@ -346,22 +346,35 @@ void Call::setMuted(bool mute) {
}
void Call::setupOutgoingVideo() {
static const auto hasDevices = [] {
return !Webrtc::GetVideoInputList().empty();
};
const auto started = _videoOutgoing->state();
if (!hasDevices()) {
_videoOutgoing->setState(Webrtc::VideoState::Inactive);
}
_videoOutgoing->stateValue(
) | rpl::start_with_next([=](Webrtc::VideoState state) {
if (state != Webrtc::VideoState::Inactive
&& Webrtc::GetVideoInputList().empty()) {
if (state != Webrtc::VideoState::Inactive && !hasDevices()) {
_errors.fire({ ErrorType::NoCamera });
_videoOutgoing->setState(Webrtc::VideoState::Inactive);
} else if (_state.current() != State::Established
&& state != started
&& !_videoCapture) {
_errors.fire({ ErrorType::NotStartedCall });
_videoOutgoing->setState(started);
} else if (state != Webrtc::VideoState::Inactive
&& _instance
&& !_instance->supportsVideo()) {
_errors.fire({ ErrorType::NotVideoCall });
_videoOutgoing->setState(Webrtc::VideoState::Inactive);
} else if (state != Webrtc::VideoState::Inactive) {
// Paused not supported right now.
#ifndef DESKTOP_APP_DISABLE_WEBRTC_INTEGRATION
Assert(state == Webrtc::VideoState::Active);
if (!_videoCapture) {
_videoCapture = tgcalls::VideoCaptureInterface::Create();
_videoCapture = tgcalls::VideoCaptureInterface::Create(
Core::App().settings().callVideoInputDeviceId().toStdString());
_videoCapture->setOutput(_videoOutgoing->sink());
}
if (_instance) {
@ -729,8 +742,8 @@ void Call::createAndStartController(const MTPDphoneCall &call) {
std::move(encryptionKeyValue),
(_type == Type::Outgoing)),
.mediaDevicesConfig = tgcalls::MediaDevicesConfig{
.audioInputId = settings.callInputDeviceID().toStdString(),
.audioOutputId = settings.callOutputDeviceID().toStdString(),
.audioInputId = settings.callInputDeviceId().toStdString(),
.audioOutputId = settings.callOutputDeviceId().toStdString(),
.inputVolume = settings.callInputVolume() / 100.f,
.outputVolume = settings.callOutputVolume() / 100.f,
},
@ -949,16 +962,22 @@ void Call::setState(State state) {
}
}
void Call::setCurrentAudioDevice(bool input, std::string deviceID) {
void Call::setCurrentAudioDevice(bool input, std::string deviceId) {
if (_instance) {
if (input) {
_instance->setAudioInputDevice(deviceID);
_instance->setAudioInputDevice(deviceId);
} else {
_instance->setAudioOutputDevice(deviceID);
_instance->setAudioOutputDevice(deviceId);
}
}
}
void Call::setCurrentVideoDevice(std::string deviceId) {
if (_videoCapture) {
_videoCapture->switchToDevice(deviceId);
}
}
void Call::setAudioVolume(bool input, float level) {
if (_instance) {
if (input) {

View File

@ -40,6 +40,19 @@ struct DhConfig {
bytes::vector p;
};
enum class ErrorType {
NoCamera,
NoMicrophone,
NotStartedCall,
NotVideoCall,
Unknown,
};
struct Error {
ErrorType type = ErrorType::Unknown;
QString details;
};
class Call : public base::has_weak_ptr {
public:
class Delegate {
@ -105,6 +118,10 @@ public:
return _state.value();
}
[[nodiscard]] rpl::producer<Error> errors() const {
return _errors.events();
}
enum class RemoteAudioState {
Muted,
Active,
@ -155,7 +172,8 @@ public:
QString getDebugLog() const;
void setCurrentAudioDevice(bool input, std::string deviceID);
void setCurrentAudioDevice(bool input, std::string deviceId);
void setCurrentVideoDevice(std::string deviceId);
void setAudioVolume(bool input, float level);
void setAudioDuckingEnabled(bool enabled);
@ -213,6 +231,7 @@ private:
rpl::variable<State> _state = State::Starting;
rpl::variable<RemoteAudioState> _remoteAudioState = RemoteAudioState::Active;
rpl::variable<Webrtc::VideoState> _remoteVideoState;
rpl::event_stream<Error> _errors;
FinishType _finishAfterRequestingCall = FinishType::None;
bool _answerAfterDhConfigReceived = false;
rpl::variable<int> _signalBarCount = kSignalBarStarting;

View File

@ -32,8 +32,9 @@ QByteArray Settings::serialize() const {
+ Serialize::stringSize(_downloadPath.current())
+ Serialize::bytearraySize(_downloadPathBookmark)
+ sizeof(qint32) * 12
+ Serialize::stringSize(_callOutputDeviceID)
+ Serialize::stringSize(_callInputDeviceID)
+ Serialize::stringSize(_callOutputDeviceId)
+ Serialize::stringSize(_callInputDeviceId)
+ Serialize::stringSize(_callVideoInputDeviceId)
+ sizeof(qint32) * 3;
for (const auto &[key, value] : _soundOverrides) {
size += Serialize::stringSize(key) + Serialize::stringSize(value);
@ -63,8 +64,8 @@ QByteArray Settings::serialize() const {
<< qint32(_notificationsCount)
<< static_cast<qint32>(_notificationsCorner)
<< qint32(_autoLock)
<< _callOutputDeviceID
<< _callInputDeviceID
<< _callOutputDeviceId
<< _callInputDeviceId
<< qint32(_callOutputVolume)
<< qint32(_callInputVolume)
<< qint32(_callAudioDuckingEnabled ? 1 : 0)
@ -107,7 +108,8 @@ QByteArray Settings::serialize() const {
<< qint32(_thirdSectionExtendedBy)
<< qint32(_notifyFromAll ? 1 : 0)
<< qint32(_nativeWindowFrame.current() ? 1 : 0)
<< qint32(_systemDarkModeEnabled.current() ? 1 : 0);
<< qint32(_systemDarkModeEnabled.current() ? 1 : 0)
<< _callVideoInputDeviceId;
}
return result;
}
@ -137,8 +139,9 @@ void Settings::addFromSerialized(const QByteArray &serialized) {
qint32 notificationsCount = _notificationsCount;
qint32 notificationsCorner = static_cast<qint32>(_notificationsCorner);
qint32 autoLock = _autoLock;
QString callOutputDeviceID = _callOutputDeviceID;
QString callInputDeviceID = _callInputDeviceID;
QString callOutputDeviceId = _callOutputDeviceId;
QString callInputDeviceId = _callInputDeviceId;
QString callVideoInputDeviceId = _callVideoInputDeviceId;
qint32 callOutputVolume = _callOutputVolume;
qint32 callInputVolume = _callInputVolume;
qint32 callAudioDuckingEnabled = _callAudioDuckingEnabled ? 1 : 0;
@ -193,8 +196,8 @@ void Settings::addFromSerialized(const QByteArray &serialized) {
>> notificationsCount
>> notificationsCorner
>> autoLock
>> callOutputDeviceID
>> callInputDeviceID
>> callOutputDeviceId
>> callInputDeviceId
>> callOutputVolume
>> callInputVolume
>> callAudioDuckingEnabled
@ -253,6 +256,9 @@ void Settings::addFromSerialized(const QByteArray &serialized) {
if (!stream.atEnd()) {
stream >> systemDarkModeEnabled;
}
if (!stream.atEnd()) {
stream >> callVideoInputDeviceId;
}
if (stream.status() != QDataStream::Ok) {
LOG(("App Error: "
"Bad data for Core::Settings::constructFromSerialized()"));
@ -290,8 +296,9 @@ void Settings::addFromSerialized(const QByteArray &serialized) {
_countUnreadMessages = (countUnreadMessages == 1);
_notifyAboutPinned = (notifyAboutPinned == 1);
_autoLock = autoLock;
_callOutputDeviceID = callOutputDeviceID;
_callInputDeviceID = callInputDeviceID;
_callOutputDeviceId = callOutputDeviceId;
_callInputDeviceId = callInputDeviceId;
_callVideoInputDeviceId = callVideoInputDeviceId;
_callOutputVolume = callOutputVolume;
_callInputVolume = callInputVolume;
_callAudioDuckingEnabled = (callAudioDuckingEnabled == 1);
@ -446,8 +453,9 @@ void Settings::resetOnLastLogout() {
_notifyAboutPinned = true;
//_autoLock = 3600;
//_callOutputDeviceID = u"default"_q;
//_callInputDeviceID = u"default"_q;
//_callOutputDeviceId = u"default"_q;
//_callInputDeviceId = u"default"_q;
//_callVideoInputDeviceId = u"default"_q;
//_callOutputVolume = 100;
//_callInputVolume = 100;
//_callAudioDuckingEnabled = true;

View File

@ -175,21 +175,29 @@ public:
void setAutoLock(int value) {
_autoLock = value;
}
[[nodiscard]] QString callOutputDeviceID() const {
return _callOutputDeviceID.isEmpty()
[[nodiscard]] QString callOutputDeviceId() const {
return _callOutputDeviceId.isEmpty()
? u"default"_q
: _callOutputDeviceID;
: _callOutputDeviceId;
}
void setCallOutputDeviceID(const QString &value) {
_callOutputDeviceID = value;
void setCallOutputDeviceId(const QString &value) {
_callOutputDeviceId = value;
}
[[nodiscard]] QString callInputDeviceID() const {
return _callInputDeviceID.isEmpty()
[[nodiscard]] QString callInputDeviceId() const {
return _callInputDeviceId.isEmpty()
? u"default"_q
: _callInputDeviceID;
: _callInputDeviceId;
}
void setCallInputDeviceID(const QString &value) {
_callInputDeviceID = value;
void setCallInputDeviceId(const QString &value) {
_callInputDeviceId = value;
}
[[nodiscard]] QString callVideoInputDeviceId() const {
return _callVideoInputDeviceId.isEmpty()
? u"default"_q
: _callVideoInputDeviceId;
}
void setCallVideoInputDeviceId(const QString &value) {
_callVideoInputDeviceId = value;
}
[[nodiscard]] int callOutputVolume() const {
return _callOutputVolume;
@ -493,8 +501,9 @@ private:
bool _countUnreadMessages = true;
rpl::variable<bool> _notifyAboutPinned = true;
int _autoLock = 3600;
QString _callOutputDeviceID = u"default"_q;
QString _callInputDeviceID = u"default"_q;
QString _callOutputDeviceId = u"default"_q;
QString _callInputDeviceId = u"default"_q;
QString _callVideoInputDeviceId = u"default"_q;
int _callOutputVolume = 100;
int _callInputVolume = 100;
bool _callAudioDuckingEnabled = true;

View File

@ -76,34 +76,86 @@ void Calls::setupContent() {
};
const auto &settings = Core::App().settings();
const auto currentOutputName = [&] {
if (settings.callOutputDeviceID() == qsl("default")) {
return tr::lng_settings_call_device_default(tr::now);
}
const auto list = Webrtc::GetAudioOutputList();
const auto i = ranges::find(
list,
settings.callOutputDeviceID(),
settings.callOutputDeviceId(),
getId);
return (i != end(list))
? getName(*i)
: settings.callOutputDeviceID();
: tr::lng_settings_call_device_default(tr::now);
}();
const auto currentInputName = [&] {
if (settings.callInputDeviceID() == qsl("default")) {
return tr::lng_settings_call_device_default(tr::now);
}
const auto list = Webrtc::GetAudioInputList();
const auto i = ranges::find(
list,
settings.callInputDeviceID(),
settings.callInputDeviceId(),
getId);
return (i != end(list))
? getName(*i)
: settings.callInputDeviceID();
: tr::lng_settings_call_device_default(tr::now);
}();
const auto cameras = Webrtc::GetVideoInputList();
if (!cameras.empty()) {
const auto currentCameraName = [&] {
const auto i = ranges::find(
cameras,
settings.callVideoInputDeviceId(),
getId);
return (i != end(cameras))
? getName(*i)
: tr::lng_settings_call_device_default(tr::now);
}();
AddSkip(content);
AddSubsectionTitle(content, tr::lng_settings_call_camera());
AddButtonWithLabel(
content,
tr::lng_settings_call_input_device(),
rpl::single(
currentCameraName
) | rpl::then(
_cameraNameStream.events()
),
st::settingsButton
)->addClickHandler([=] {
const auto &devices = Webrtc::GetVideoInputList();
const auto options = ranges::view::concat(
ranges::view::single(tr::lng_settings_call_device_default(tr::now)),
devices | ranges::view::transform(getName)
) | ranges::to_vector;
const auto i = ranges::find(
devices,
Core::App().settings().callVideoInputDeviceId(),
getId);
const auto currentOption = (i != end(devices))
? int(i - begin(devices) + 1)
: 0;
const auto save = crl::guard(this, [=](int option) {
_cameraNameStream.fire_copy(options[option]);
const auto deviceId = option
? devices[option - 1].id
: "default";
Core::App().settings().setCallVideoInputDeviceId(deviceId);
Core::App().saveSettingsDelayed();
if (const auto call = Core::App().calls().currentCall()) {
call->setCurrentVideoDevice(deviceId.toStdString());
}
});
Ui::show(Box<SingleChoiceBox>(
tr::lng_settings_call_camera(),
options,
currentOption,
save));
});
AddSkip(content);
AddDivider(content);
}
AddSkip(content);
AddSubsectionTitle(content, tr::lng_settings_call_section_output());
AddButtonWithLabel(
@ -123,7 +175,7 @@ void Calls::setupContent() {
) | ranges::to_vector;
const auto i = ranges::find(
devices,
Core::App().settings().callOutputDeviceID(),
Core::App().settings().callOutputDeviceId(),
getId);
const auto currentOption = (i != end(devices))
? int(i - begin(devices) + 1)
@ -133,7 +185,7 @@ void Calls::setupContent() {
const auto deviceId = option
? devices[option - 1].id
: "default";
Core::App().settings().setCallOutputDeviceID(deviceId);
Core::App().settings().setCallOutputDeviceId(deviceId);
Core::App().saveSettingsDelayed();
if (const auto call = Core::App().calls().currentCall()) {
call->setCurrentAudioDevice(false, deviceId.toStdString());
@ -198,7 +250,7 @@ void Calls::setupContent() {
) | ranges::to_vector;
const auto i = ranges::find(
devices,
Core::App().settings().callInputDeviceID(),
Core::App().settings().callInputDeviceId(),
getId);
const auto currentOption = (i != end(devices))
? int(i - begin(devices) + 1)
@ -208,7 +260,7 @@ void Calls::setupContent() {
const auto deviceId = option
? devices[option - 1].id
: "default";
Core::App().settings().setCallInputDeviceID(deviceId);
Core::App().settings().setCallInputDeviceId(deviceId);
Core::App().saveSettingsDelayed();
if (_micTester) {
stopTestingMicrophone();
@ -354,7 +406,7 @@ void Calls::startTestingMicrophone() {
_micTestTextStream.fire(tr::lng_settings_call_stop_mic_test(tr::now));
_levelUpdateTimer.callEach(50);
_micTester = std::make_unique<tgvoip::AudioInputTester>(
Core::App().settings().callInputDeviceID().toStdString());
Core::App().settings().callInputDeviceId().toStdString());
if (_micTester->Failed()) {
stopTestingMicrophone();
Ui::show(Box<InformBox>(tr::lng_call_error_audio_io(tr::now)));

View File

@ -38,6 +38,7 @@ private:
void stopTestingMicrophone();
const not_null<Window::SessionController*> _controller;
rpl::event_stream<QString> _cameraNameStream;
rpl::event_stream<QString> _outputNameStream;
rpl::event_stream<QString> _inputNameStream;
rpl::event_stream<QString> _micTestTextStream;

View File

@ -1093,9 +1093,9 @@ bool ReadSetting(
settingsStream >> duckingEnabled;
if (CheckStreamStatus(settingsStream)) {
auto &app = Core::App().settings();
app.setCallOutputDeviceID(outputDeviceID);
app.setCallOutputDeviceId(outputDeviceID);
app.setCallOutputVolume(outputVolume);
app.setCallInputDeviceID(inputDeviceID);
app.setCallInputDeviceId(inputDeviceID);
app.setCallInputVolume(inputVolume);
app.setCallAudioDuckingEnabled(duckingEnabled);
}

@ -1 +1 @@
Subproject commit c6c78d68729ce16cff2bee868939780788443963
Subproject commit 481f7fa9bbcbc07ebbbdb6432bdeebc1e0ca3982

@ -1 +1 @@
Subproject commit 32957e855993b20c95fa714518ba4bc55ebcdd32
Subproject commit 0b6130704442a1fb8cd91407ec46d3ae5a66389e