Fix multiple issues with device settings
To track media devices, we were previously relying on a combination of LiveKit's useMediaDeviceSelect hook, and an object called UserChoices. Device settings should be accessible from outside a call, but the latter hook should only be used with a room or set of preview tracks, so it couldn't be raised to the app's top level. I also felt that the UserChoices code was hard to follow due to lack of clear ownership of the object. To bring clarity to media device handling and allow device settings to be shown outside a call, I refactored these things into a single MediaDevicesContext which is instantiated at the top level of the app. Then, I had to manually sync LiveKit's device state with whatever is present in the context. This refactoring ended up fixing a couple other bugs with device handling along the way.
This commit is contained in:
51
src/App.tsx
51
src/App.tsx
@@ -31,6 +31,7 @@ import { InspectorContextProvider } from "./room/GroupCallInspector";
|
||||
import { CrashView, LoadingView } from "./FullScreenView";
|
||||
import { DisconnectedBanner } from "./DisconnectedBanner";
|
||||
import { Initializer } from "./initializer";
|
||||
import { MediaDevicesProvider } from "./livekit/MediaDevicesContext";
|
||||
|
||||
const SentryRoute = Sentry.withSentryRouting(Route);
|
||||
|
||||
@@ -58,30 +59,32 @@ export default function App({ history }: AppProps) {
|
||||
{loaded ? (
|
||||
<Suspense fallback={null}>
|
||||
<ClientProvider>
|
||||
<InspectorContextProvider>
|
||||
<Sentry.ErrorBoundary fallback={errorPage}>
|
||||
<OverlayProvider>
|
||||
<DisconnectedBanner />
|
||||
<Switch>
|
||||
<SentryRoute exact path="/">
|
||||
<HomePage />
|
||||
</SentryRoute>
|
||||
<SentryRoute exact path="/login">
|
||||
<LoginPage />
|
||||
</SentryRoute>
|
||||
<SentryRoute exact path="/register">
|
||||
<RegisterPage />
|
||||
</SentryRoute>
|
||||
<SentryRoute path="/inspector">
|
||||
<SequenceDiagramViewerPage />
|
||||
</SentryRoute>
|
||||
<SentryRoute path="*">
|
||||
<RoomPage />
|
||||
</SentryRoute>
|
||||
</Switch>
|
||||
</OverlayProvider>
|
||||
</Sentry.ErrorBoundary>
|
||||
</InspectorContextProvider>
|
||||
<MediaDevicesProvider>
|
||||
<InspectorContextProvider>
|
||||
<Sentry.ErrorBoundary fallback={errorPage}>
|
||||
<OverlayProvider>
|
||||
<DisconnectedBanner />
|
||||
<Switch>
|
||||
<SentryRoute exact path="/">
|
||||
<HomePage />
|
||||
</SentryRoute>
|
||||
<SentryRoute exact path="/login">
|
||||
<LoginPage />
|
||||
</SentryRoute>
|
||||
<SentryRoute exact path="/register">
|
||||
<RegisterPage />
|
||||
</SentryRoute>
|
||||
<SentryRoute path="/inspector">
|
||||
<SequenceDiagramViewerPage />
|
||||
</SentryRoute>
|
||||
<SentryRoute path="*">
|
||||
<RoomPage />
|
||||
</SentryRoute>
|
||||
</Switch>
|
||||
</OverlayProvider>
|
||||
</Sentry.ErrorBoundary>
|
||||
</InspectorContextProvider>
|
||||
</MediaDevicesProvider>
|
||||
</ClientProvider>
|
||||
</Suspense>
|
||||
) : (
|
||||
|
||||
207
src/livekit/MediaDevicesContext.tsx
Normal file
207
src/livekit/MediaDevicesContext.tsx
Normal file
@@ -0,0 +1,207 @@
|
||||
/*
|
||||
Copyright 2023 New Vector Ltd
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
import {
|
||||
FC,
|
||||
createContext,
|
||||
useCallback,
|
||||
useContext,
|
||||
useEffect,
|
||||
useMemo,
|
||||
useRef,
|
||||
useState,
|
||||
} from "react";
|
||||
import { createMediaDeviceObserver } from "@livekit/components-core";
|
||||
import { Observable } from "rxjs";
|
||||
|
||||
import {
|
||||
useAudioInput,
|
||||
useAudioOutput,
|
||||
useVideoInput,
|
||||
} from "../settings/useSetting";
|
||||
|
||||
export interface MediaDevice {
|
||||
available: MediaDeviceInfo[];
|
||||
selectedId: string | undefined;
|
||||
select: (deviceId: string) => void;
|
||||
}
|
||||
|
||||
export interface MediaDevices {
|
||||
audioInput: MediaDevice;
|
||||
audioOutput: MediaDevice;
|
||||
videoInput: MediaDevice;
|
||||
startUsingDeviceNames: () => void;
|
||||
stopUsingDeviceNames: () => void;
|
||||
}
|
||||
|
||||
// Cargo-culted from @livekit/components-react
|
||||
function useObservableState<T>(
|
||||
observable: Observable<T> | undefined,
|
||||
startWith: T
|
||||
) {
|
||||
const [state, setState] = useState<T>(startWith);
|
||||
useEffect(() => {
|
||||
// observable state doesn't run in SSR
|
||||
if (typeof window === "undefined" || !observable) return;
|
||||
const subscription = observable.subscribe(setState);
|
||||
return () => subscription.unsubscribe();
|
||||
}, [observable]);
|
||||
return state;
|
||||
}
|
||||
|
||||
function useMediaDevice(
|
||||
kind: MediaDeviceKind,
|
||||
fallbackDevice: string | undefined,
|
||||
usingNames: boolean
|
||||
): MediaDevice {
|
||||
// Make sure we don't needlessly reset to a device observer without names,
|
||||
// once permissions are already given
|
||||
const hasRequestedPermissions = useRef(false);
|
||||
const requestPermissions = usingNames || hasRequestedPermissions.current;
|
||||
hasRequestedPermissions.current ||= usingNames;
|
||||
|
||||
// We use a bare device observer here rather than one of the fancy device
|
||||
// selection hooks from @livekit/components-react, because
|
||||
// useMediaDeviceSelect expects a room or track, which we don't have here, and
|
||||
// useMediaDevices provides no way to request device names.
|
||||
// Tragically, the only way to get device names out of LiveKit is to specify a
|
||||
// kind, which then results in multiple permissions requests.
|
||||
const deviceObserver = useMemo(
|
||||
() => createMediaDeviceObserver(kind, requestPermissions),
|
||||
[kind, requestPermissions]
|
||||
);
|
||||
const available = useObservableState(deviceObserver, []);
|
||||
const [selectedId, select] = useState(fallbackDevice);
|
||||
|
||||
return useMemo(
|
||||
() => ({
|
||||
available,
|
||||
selectedId: available.some((d) => d.deviceId === selectedId)
|
||||
? selectedId
|
||||
: available.some((d) => d.deviceId === fallbackDevice)
|
||||
? fallbackDevice
|
||||
: available.at(0)?.deviceId,
|
||||
select,
|
||||
}),
|
||||
[available, selectedId, fallbackDevice, select]
|
||||
);
|
||||
}
|
||||
|
||||
const deviceStub: MediaDevice = {
|
||||
available: [],
|
||||
selectedId: undefined,
|
||||
select: () => {},
|
||||
};
|
||||
const devicesStub: MediaDevices = {
|
||||
audioInput: deviceStub,
|
||||
audioOutput: deviceStub,
|
||||
videoInput: deviceStub,
|
||||
startUsingDeviceNames: () => {},
|
||||
stopUsingDeviceNames: () => {},
|
||||
};
|
||||
|
||||
const MediaDevicesContext = createContext<MediaDevices>(devicesStub);
|
||||
|
||||
interface Props {
|
||||
children: JSX.Element;
|
||||
}
|
||||
|
||||
export const MediaDevicesProvider: FC<Props> = ({ children }) => {
|
||||
// Counts the number of callers currently using device names
|
||||
const [numCallersUsingNames, setNumCallersUsingNames] = useState(0);
|
||||
const usingNames = numCallersUsingNames > 0;
|
||||
|
||||
const [audioInputSetting, setAudioInputSetting] = useAudioInput();
|
||||
const [audioOutputSetting, setAudioOutputSetting] = useAudioOutput();
|
||||
const [videoInputSetting, setVideoInputSetting] = useVideoInput();
|
||||
|
||||
const audioInput = useMediaDevice(
|
||||
"audioinput",
|
||||
audioInputSetting,
|
||||
usingNames
|
||||
);
|
||||
const audioOutput = useMediaDevice(
|
||||
"audiooutput",
|
||||
audioOutputSetting,
|
||||
usingNames
|
||||
);
|
||||
const videoInput = useMediaDevice(
|
||||
"videoinput",
|
||||
videoInputSetting,
|
||||
usingNames
|
||||
);
|
||||
|
||||
useEffect(() => {
|
||||
if (audioInput.selectedId !== undefined)
|
||||
setAudioInputSetting(audioInput.selectedId);
|
||||
}, [setAudioInputSetting, audioInput.selectedId]);
|
||||
|
||||
useEffect(() => {
|
||||
if (audioOutput.selectedId !== undefined)
|
||||
setAudioOutputSetting(audioOutput.selectedId);
|
||||
}, [setAudioOutputSetting, audioOutput.selectedId]);
|
||||
|
||||
useEffect(() => {
|
||||
if (videoInput.selectedId !== undefined)
|
||||
setVideoInputSetting(videoInput.selectedId);
|
||||
}, [setVideoInputSetting, videoInput.selectedId]);
|
||||
|
||||
const startUsingDeviceNames = useCallback(
|
||||
() => setNumCallersUsingNames((n) => n + 1),
|
||||
[setNumCallersUsingNames]
|
||||
);
|
||||
const stopUsingDeviceNames = useCallback(
|
||||
() => setNumCallersUsingNames((n) => n - 1),
|
||||
[setNumCallersUsingNames]
|
||||
);
|
||||
|
||||
const context: MediaDevices = useMemo(
|
||||
() => ({
|
||||
audioInput,
|
||||
audioOutput,
|
||||
videoInput,
|
||||
startUsingDeviceNames,
|
||||
stopUsingDeviceNames,
|
||||
}),
|
||||
[
|
||||
audioInput,
|
||||
audioOutput,
|
||||
videoInput,
|
||||
startUsingDeviceNames,
|
||||
stopUsingDeviceNames,
|
||||
]
|
||||
);
|
||||
|
||||
return (
|
||||
<MediaDevicesContext.Provider value={context}>
|
||||
{children}
|
||||
</MediaDevicesContext.Provider>
|
||||
);
|
||||
};
|
||||
|
||||
export const useMediaDevices = () => useContext(MediaDevicesContext);
|
||||
|
||||
/**
|
||||
* React hook that requests for the media devices context to be populated with
|
||||
* real device names while this component is mounted. This is not done by
|
||||
* default because it may involve requesting additional permissions from the
|
||||
* user.
|
||||
*/
|
||||
export const useMediaDeviceNames = (context: MediaDevices) =>
|
||||
useEffect(() => {
|
||||
context.startUsingDeviceNames();
|
||||
return context.stopUsingDeviceNames;
|
||||
}, [context]);
|
||||
@@ -1,3 +1,19 @@
|
||||
/*
|
||||
Copyright 2023 New Vector Ltd
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
import {
|
||||
E2EEOptions,
|
||||
ExternalE2EEKeyProvider,
|
||||
@@ -6,21 +22,18 @@ import {
|
||||
setLogLevel,
|
||||
} from "livekit-client";
|
||||
import { useLiveKitRoom } from "@livekit/components-react";
|
||||
import { useEffect, useMemo } from "react";
|
||||
import { useEffect, useMemo, useRef } from "react";
|
||||
import E2EEWorker from "livekit-client/e2ee-worker?worker";
|
||||
import { logger } from "matrix-js-sdk/src/logger";
|
||||
|
||||
import { defaultLiveKitOptions } from "./options";
|
||||
import { SFUConfig } from "./openIDSFU";
|
||||
|
||||
export type UserChoices = {
|
||||
audio?: DeviceChoices;
|
||||
video?: DeviceChoices;
|
||||
};
|
||||
|
||||
export type DeviceChoices = {
|
||||
selectedId?: string;
|
||||
enabled: boolean;
|
||||
};
|
||||
import { MuteStates } from "../room/MuteStates";
|
||||
import {
|
||||
MediaDevice,
|
||||
MediaDevices,
|
||||
useMediaDevices,
|
||||
} from "./MediaDevicesContext";
|
||||
|
||||
export type E2EEConfig = {
|
||||
sharedKey: string;
|
||||
@@ -29,7 +42,7 @@ export type E2EEConfig = {
|
||||
setLogLevel("debug");
|
||||
|
||||
export function useLiveKit(
|
||||
userChoices: UserChoices,
|
||||
muteStates: MuteStates,
|
||||
sfuConfig?: SFUConfig,
|
||||
e2eeConfig?: E2EEConfig
|
||||
): Room | undefined {
|
||||
@@ -50,21 +63,30 @@ export function useLiveKit(
|
||||
);
|
||||
}, [e2eeOptions, e2eeConfig?.sharedKey]);
|
||||
|
||||
const roomOptions = useMemo((): RoomOptions => {
|
||||
const options = defaultLiveKitOptions;
|
||||
options.videoCaptureDefaults = {
|
||||
...options.videoCaptureDefaults,
|
||||
deviceId: userChoices.video?.selectedId,
|
||||
};
|
||||
options.audioCaptureDefaults = {
|
||||
...options.audioCaptureDefaults,
|
||||
deviceId: userChoices.audio?.selectedId,
|
||||
};
|
||||
const initialMuteStates = useRef<MuteStates>(muteStates);
|
||||
const devices = useMediaDevices();
|
||||
const initialDevices = useRef<MediaDevices>(devices);
|
||||
|
||||
options.e2ee = e2eeOptions;
|
||||
|
||||
return options;
|
||||
}, [userChoices.video, userChoices.audio, e2eeOptions]);
|
||||
const roomOptions = useMemo(
|
||||
(): RoomOptions => ({
|
||||
...defaultLiveKitOptions,
|
||||
videoCaptureDefaults: {
|
||||
...defaultLiveKitOptions.videoCaptureDefaults,
|
||||
deviceId: initialDevices.current.videoInput.selectedId,
|
||||
},
|
||||
audioCaptureDefaults: {
|
||||
...defaultLiveKitOptions.audioCaptureDefaults,
|
||||
deviceId: initialDevices.current.audioInput.selectedId,
|
||||
},
|
||||
// XXX Setting the audio output here doesn't seem to do anything… a bug in
|
||||
// LiveKit?
|
||||
audioOutput: {
|
||||
deviceId: initialDevices.current.audioOutput.selectedId,
|
||||
},
|
||||
e2ee: e2eeOptions,
|
||||
}),
|
||||
[e2eeOptions]
|
||||
);
|
||||
|
||||
// We have to create the room manually here due to a bug inside
|
||||
// @livekit/components-react. JSON.stringify() is used in deps of a
|
||||
@@ -73,10 +95,53 @@ export function useLiveKit(
|
||||
const { room } = useLiveKitRoom({
|
||||
token: sfuConfig?.jwt,
|
||||
serverUrl: sfuConfig?.url,
|
||||
audio: userChoices.audio?.enabled ?? false,
|
||||
video: userChoices.video?.enabled ?? false,
|
||||
audio: initialMuteStates.current.audio.enabled,
|
||||
video: initialMuteStates.current.video.enabled,
|
||||
room: roomWithoutProps,
|
||||
});
|
||||
|
||||
useEffect(() => {
|
||||
// Sync the requested mute states with LiveKit's mute states. We do it this
|
||||
// way around rather than using LiveKit as the source of truth, so that the
|
||||
// states can be consistent throughout the lobby and loading screens.
|
||||
if (room !== undefined) {
|
||||
const participant = room.localParticipant;
|
||||
if (participant.isMicrophoneEnabled !== muteStates.audio.enabled) {
|
||||
participant
|
||||
.setMicrophoneEnabled(muteStates.audio.enabled)
|
||||
.catch((e) =>
|
||||
logger.error("Failed to sync audio mute state with LiveKit", e)
|
||||
);
|
||||
}
|
||||
if (participant.isCameraEnabled !== muteStates.video.enabled) {
|
||||
participant
|
||||
.setCameraEnabled(muteStates.video.enabled)
|
||||
.catch((e) =>
|
||||
logger.error("Failed to sync video mute state with LiveKit", e)
|
||||
);
|
||||
}
|
||||
}
|
||||
}, [room, muteStates]);
|
||||
|
||||
useEffect(() => {
|
||||
// Sync the requested devices with LiveKit's devices
|
||||
if (room !== undefined) {
|
||||
const syncDevice = (kind: MediaDeviceKind, device: MediaDevice) => {
|
||||
const id = device.selectedId;
|
||||
if (id !== undefined && room.getActiveDevice(kind) !== id) {
|
||||
room
|
||||
.switchActiveDevice(kind, id)
|
||||
.catch((e) =>
|
||||
logger.error(`Failed to sync ${kind} device with LiveKit`, e)
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
syncDevice("audioinput", devices.audioInput);
|
||||
syncDevice("audiooutput", devices.audioOutput);
|
||||
syncDevice("videoinput", devices.videoInput);
|
||||
}
|
||||
}, [room, devices]);
|
||||
|
||||
return room;
|
||||
}
|
||||
|
||||
@@ -1,101 +0,0 @@
|
||||
import { useMediaDeviceSelect } from "@livekit/components-react";
|
||||
import { LocalAudioTrack, LocalVideoTrack, Room } from "livekit-client";
|
||||
import { useEffect } from "react";
|
||||
|
||||
import { useDefaultDevices } from "../settings/useSetting";
|
||||
|
||||
export type MediaDevices = {
|
||||
available: MediaDeviceInfo[];
|
||||
selectedId: string;
|
||||
setSelected: (deviceId: string) => Promise<void>;
|
||||
};
|
||||
|
||||
export type MediaDevicesState = {
|
||||
audioIn: MediaDevices;
|
||||
audioOut: MediaDevices;
|
||||
videoIn: MediaDevices;
|
||||
};
|
||||
|
||||
// if a room is passed this only affects the device selection inside a call. Without room it changes what we see in the lobby
|
||||
export function useMediaDevicesSwitcher(
|
||||
room?: Room,
|
||||
tracks?: { videoTrack?: LocalVideoTrack; audioTrack?: LocalAudioTrack },
|
||||
requestPermissions = true
|
||||
): MediaDevicesState {
|
||||
const {
|
||||
devices: videoDevices,
|
||||
activeDeviceId: activeVideoDevice,
|
||||
setActiveMediaDevice: setActiveVideoDevice,
|
||||
} = useMediaDeviceSelect({
|
||||
kind: "videoinput",
|
||||
room,
|
||||
track: tracks?.videoTrack,
|
||||
requestPermissions,
|
||||
});
|
||||
|
||||
const {
|
||||
devices: audioDevices,
|
||||
activeDeviceId: activeAudioDevice,
|
||||
setActiveMediaDevice: setActiveAudioDevice,
|
||||
} = useMediaDeviceSelect({
|
||||
kind: "audioinput",
|
||||
room,
|
||||
track: tracks?.audioTrack,
|
||||
requestPermissions,
|
||||
});
|
||||
|
||||
const {
|
||||
devices: audioOutputDevices,
|
||||
activeDeviceId: activeAudioOutputDevice,
|
||||
setActiveMediaDevice: setActiveAudioOutputDevice,
|
||||
} = useMediaDeviceSelect({
|
||||
kind: "audiooutput",
|
||||
room,
|
||||
});
|
||||
|
||||
const [settingsDefaultDevices, setSettingsDefaultDevices] =
|
||||
useDefaultDevices();
|
||||
|
||||
useEffect(() => {
|
||||
setSettingsDefaultDevices({
|
||||
audioinput:
|
||||
activeAudioDevice != ""
|
||||
? activeAudioDevice
|
||||
: settingsDefaultDevices.audioinput,
|
||||
videoinput:
|
||||
activeVideoDevice != ""
|
||||
? activeVideoDevice
|
||||
: settingsDefaultDevices.videoinput,
|
||||
audiooutput:
|
||||
activeAudioOutputDevice != ""
|
||||
? activeAudioOutputDevice
|
||||
: settingsDefaultDevices.audiooutput,
|
||||
});
|
||||
}, [
|
||||
activeAudioDevice,
|
||||
activeAudioOutputDevice,
|
||||
activeVideoDevice,
|
||||
setSettingsDefaultDevices,
|
||||
settingsDefaultDevices.audioinput,
|
||||
settingsDefaultDevices.audiooutput,
|
||||
settingsDefaultDevices.videoinput,
|
||||
]);
|
||||
|
||||
return {
|
||||
audioIn: {
|
||||
available: audioDevices,
|
||||
selectedId: activeAudioDevice,
|
||||
setSelected: setActiveAudioDevice,
|
||||
},
|
||||
audioOut: {
|
||||
available: audioOutputDevices,
|
||||
selectedId: activeAudioOutputDevice,
|
||||
setSelected: setActiveAudioOutputDevice,
|
||||
},
|
||||
videoIn: {
|
||||
available: videoDevices,
|
||||
selectedId: activeVideoDevice,
|
||||
setSelected: setActiveVideoDevice,
|
||||
},
|
||||
};
|
||||
}
|
||||
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
import { useCallback, useEffect, useMemo, useState } from "react";
|
||||
import { useCallback, useEffect, useMemo, useRef, useState } from "react";
|
||||
import { useHistory } from "react-router-dom";
|
||||
import { GroupCall, GroupCallState } from "matrix-js-sdk/src/webrtc/groupCall";
|
||||
import { MatrixClient } from "matrix-js-sdk/src/client";
|
||||
@@ -32,17 +32,13 @@ import { CallEndedView } from "./CallEndedView";
|
||||
import { useSentryGroupCallHandler } from "./useSentryGroupCallHandler";
|
||||
import { PosthogAnalytics } from "../analytics/PosthogAnalytics";
|
||||
import { useProfile } from "../profile/useProfile";
|
||||
import { E2EEConfig, UserChoices } from "../livekit/useLiveKit";
|
||||
import { E2EEConfig } from "../livekit/useLiveKit";
|
||||
import { findDeviceByName } from "../media-utils";
|
||||
import { OpenIDLoader } from "../livekit/OpenIDLoader";
|
||||
import { ActiveCall } from "./InCallView";
|
||||
import { Config } from "../config/Config";
|
||||
|
||||
/**
|
||||
* If there already is this many participants in the call, we automatically mute
|
||||
* the user
|
||||
*/
|
||||
const MUTE_PARTICIPANT_COUNT = 8;
|
||||
import { MuteStates, useMuteStates } from "./MuteStates";
|
||||
import { useMediaDevices, MediaDevices } from "../livekit/MediaDevicesContext";
|
||||
|
||||
declare global {
|
||||
interface Window {
|
||||
@@ -97,17 +93,30 @@ export function GroupCallView({
|
||||
};
|
||||
}, [displayName, avatarUrl, groupCall]);
|
||||
|
||||
const deviceContext = useMediaDevices();
|
||||
const latestDevices = useRef<MediaDevices>();
|
||||
latestDevices.current = deviceContext;
|
||||
|
||||
const muteStates = useMuteStates(participants.size);
|
||||
const latestMuteStates = useRef<MuteStates>();
|
||||
latestMuteStates.current = muteStates;
|
||||
|
||||
useEffect(() => {
|
||||
if (widget && preload) {
|
||||
// In preload mode, wait for a join action before entering
|
||||
const onJoin = async (ev: CustomEvent<IWidgetApiRequest>) => {
|
||||
const devices = await Room.getLocalDevices();
|
||||
// XXX: I think this is broken currently - LiveKit *won't* request
|
||||
// permissions and give you device names unless you specify a kind, but
|
||||
// here we want all kinds of devices. This needs a fix in livekit-client
|
||||
// for the following name-matching logic to do anything useful.
|
||||
const devices = await Room.getLocalDevices(undefined, true);
|
||||
|
||||
const { audioInput, videoInput } = ev.detail
|
||||
.data as unknown as JoinCallData;
|
||||
const newChoices = {} as UserChoices;
|
||||
|
||||
if (audioInput !== null) {
|
||||
if (audioInput === null) {
|
||||
latestMuteStates.current!.audio.setEnabled?.(false);
|
||||
} else {
|
||||
const deviceId = await findDeviceByName(
|
||||
audioInput,
|
||||
"audioinput",
|
||||
@@ -115,15 +124,19 @@ export function GroupCallView({
|
||||
);
|
||||
if (!deviceId) {
|
||||
logger.warn("Unknown audio input: " + audioInput);
|
||||
latestMuteStates.current!.audio.setEnabled?.(false);
|
||||
} else {
|
||||
logger.debug(
|
||||
`Found audio input ID ${deviceId} for name ${audioInput}`
|
||||
);
|
||||
newChoices.audio = { selectedId: deviceId, enabled: true };
|
||||
latestDevices.current!.audioInput.select(deviceId);
|
||||
latestMuteStates.current!.audio.setEnabled?.(true);
|
||||
}
|
||||
}
|
||||
|
||||
if (videoInput !== null) {
|
||||
if (videoInput === null) {
|
||||
latestMuteStates.current!.video.setEnabled?.(true);
|
||||
} else {
|
||||
const deviceId = await findDeviceByName(
|
||||
videoInput,
|
||||
"videoinput",
|
||||
@@ -131,15 +144,16 @@ export function GroupCallView({
|
||||
);
|
||||
if (!deviceId) {
|
||||
logger.warn("Unknown video input: " + videoInput);
|
||||
latestMuteStates.current!.video.setEnabled?.(false);
|
||||
} else {
|
||||
logger.debug(
|
||||
`Found video input ID ${deviceId} for name ${videoInput}`
|
||||
);
|
||||
newChoices.video = { selectedId: deviceId, enabled: true };
|
||||
latestDevices.current!.videoInput.select(deviceId);
|
||||
latestMuteStates.current!.video.setEnabled?.(true);
|
||||
}
|
||||
}
|
||||
|
||||
setUserChoices(newChoices);
|
||||
await enter();
|
||||
|
||||
PosthogAnalytics.instance.eventCallEnded.cacheStartCall(new Date());
|
||||
@@ -227,9 +241,6 @@ export function GroupCallView({
|
||||
}
|
||||
}, [groupCall, state, leave]);
|
||||
|
||||
const [userChoices, setUserChoices] = useState<UserChoices | undefined>(
|
||||
undefined
|
||||
);
|
||||
const [e2eeConfig, setE2EEConfig] = useState<E2EEConfig | undefined>(
|
||||
undefined
|
||||
);
|
||||
@@ -248,7 +259,7 @@ export function GroupCallView({
|
||||
|
||||
if (error) {
|
||||
return <ErrorView error={error} />;
|
||||
} else if (state === GroupCallState.Entered && userChoices) {
|
||||
} else if (state === GroupCallState.Entered) {
|
||||
return (
|
||||
<OpenIDLoader
|
||||
client={client}
|
||||
@@ -262,7 +273,7 @@ export function GroupCallView({
|
||||
onLeave={onLeave}
|
||||
unencryptedEventsFromUsers={unencryptedEventsFromUsers}
|
||||
hideHeader={hideHeader}
|
||||
userChoices={userChoices}
|
||||
muteStates={muteStates}
|
||||
e2eeConfig={e2eeConfig}
|
||||
otelGroupCallMembership={otelGroupCallMembership}
|
||||
/>
|
||||
@@ -307,12 +318,11 @@ export function GroupCallView({
|
||||
return (
|
||||
<LobbyView
|
||||
matrixInfo={matrixInfo}
|
||||
onEnter={(choices: UserChoices, e2eeConfig?: E2EEConfig) => {
|
||||
setUserChoices(choices);
|
||||
muteStates={muteStates}
|
||||
onEnter={(e2eeConfig?: E2EEConfig) => {
|
||||
setE2EEConfig(e2eeConfig);
|
||||
enter();
|
||||
}}
|
||||
initWithMutedAudio={participants.size > MUTE_PARTICIPANT_COUNT}
|
||||
isEmbedded={isEmbedded}
|
||||
hideHeader={hideHeader}
|
||||
/>
|
||||
|
||||
@@ -78,14 +78,15 @@ import { SettingsModal } from "../settings/SettingsModal";
|
||||
import { InviteModal } from "./InviteModal";
|
||||
import { useRageshakeRequestModal } from "../settings/submit-rageshake";
|
||||
import { RageshakeRequestModal } from "./RageshakeRequestModal";
|
||||
import { E2EEConfig, UserChoices, useLiveKit } from "../livekit/useLiveKit";
|
||||
import { useMediaDevicesSwitcher } from "../livekit/useMediaDevicesSwitcher";
|
||||
import { E2EEConfig, useLiveKit } from "../livekit/useLiveKit";
|
||||
import { useFullscreen } from "./useFullscreen";
|
||||
import { useLayoutStates } from "../video-grid/Layout";
|
||||
import { useSFUConfig } from "../livekit/OpenIDLoader";
|
||||
import { E2EELock } from "../E2EELock";
|
||||
import { useEventEmitterThree } from "../useEvents";
|
||||
import { useWakeLock } from "../useWakeLock";
|
||||
import { useMergedRefs } from "../useMergedRefs";
|
||||
import { MuteStates } from "./MuteStates";
|
||||
|
||||
const canScreenshare = "getDisplayMedia" in (navigator.mediaDevices ?? {});
|
||||
// There is currently a bug in Safari our our code with cloning and sending MediaStreams
|
||||
@@ -94,17 +95,12 @@ const canScreenshare = "getDisplayMedia" in (navigator.mediaDevices ?? {});
|
||||
const isSafari = /^((?!chrome|android).)*safari/i.test(navigator.userAgent);
|
||||
|
||||
export interface ActiveCallProps extends Omit<InCallViewProps, "livekitRoom"> {
|
||||
userChoices: UserChoices;
|
||||
e2eeConfig?: E2EEConfig;
|
||||
}
|
||||
|
||||
export function ActiveCall(props: ActiveCallProps) {
|
||||
const sfuConfig = useSFUConfig();
|
||||
const livekitRoom = useLiveKit(
|
||||
props.userChoices,
|
||||
sfuConfig,
|
||||
props.e2eeConfig
|
||||
);
|
||||
const livekitRoom = useLiveKit(props.muteStates, sfuConfig, props.e2eeConfig);
|
||||
|
||||
if (!livekitRoom) {
|
||||
return null;
|
||||
@@ -125,6 +121,7 @@ export interface InCallViewProps {
|
||||
client: MatrixClient;
|
||||
groupCall: GroupCall;
|
||||
livekitRoom: Room;
|
||||
muteStates: MuteStates;
|
||||
participants: Map<RoomMember, Map<string, ParticipantInfo>>;
|
||||
onLeave: (error?: Error) => void;
|
||||
unencryptedEventsFromUsers: Set<string>;
|
||||
@@ -136,6 +133,7 @@ export function InCallView({
|
||||
client,
|
||||
groupCall,
|
||||
livekitRoom,
|
||||
muteStates,
|
||||
participants,
|
||||
onLeave,
|
||||
unencryptedEventsFromUsers,
|
||||
@@ -150,16 +148,7 @@ export function InCallView({
|
||||
const [containerRef2, bounds] = useMeasure({ polyfill: ResizeObserver });
|
||||
const boundsValid = bounds.height > 0;
|
||||
// Merge the refs so they can attach to the same element
|
||||
const containerRef = useCallback(
|
||||
(el: HTMLDivElement) => {
|
||||
containerRef1.current = el;
|
||||
containerRef2(el);
|
||||
},
|
||||
[containerRef1, containerRef2]
|
||||
);
|
||||
|
||||
// Managed media devices state coupled with an active room.
|
||||
const roomMediaSwitcher = useMediaDevicesSwitcher(livekitRoom);
|
||||
const containerRef = useMergedRefs(containerRef1, containerRef2);
|
||||
|
||||
const screenSharingTracks = useTracks(
|
||||
[{ source: Track.Source.ScreenShare, withPlaceholder: false }],
|
||||
@@ -176,19 +165,18 @@ export function InCallView({
|
||||
|
||||
const { hideScreensharing } = useUrlParams();
|
||||
|
||||
const {
|
||||
isMicrophoneEnabled,
|
||||
isCameraEnabled,
|
||||
isScreenShareEnabled,
|
||||
localParticipant,
|
||||
} = useLocalParticipant({ room: livekitRoom });
|
||||
const { isScreenShareEnabled, localParticipant } = useLocalParticipant({
|
||||
room: livekitRoom,
|
||||
});
|
||||
|
||||
const toggleMicrophone = useCallback(async () => {
|
||||
await localParticipant.setMicrophoneEnabled(!isMicrophoneEnabled);
|
||||
}, [localParticipant, isMicrophoneEnabled]);
|
||||
const toggleCamera = useCallback(async () => {
|
||||
await localParticipant.setCameraEnabled(!isCameraEnabled);
|
||||
}, [localParticipant, isCameraEnabled]);
|
||||
const toggleMicrophone = useCallback(
|
||||
() => muteStates.audio.setEnabled?.((e) => !e),
|
||||
[muteStates]
|
||||
);
|
||||
const toggleCamera = useCallback(
|
||||
() => muteStates.video.setEnabled?.((e) => !e),
|
||||
[muteStates]
|
||||
);
|
||||
|
||||
const joinRule = useJoinRule(groupCall.room);
|
||||
|
||||
@@ -387,14 +375,16 @@ export function InCallView({
|
||||
buttons.push(
|
||||
<MicButton
|
||||
key="1"
|
||||
muted={!isMicrophoneEnabled}
|
||||
muted={!muteStates.audio.enabled}
|
||||
onPress={toggleMicrophone}
|
||||
disabled={muteStates.audio.setEnabled === null}
|
||||
data-testid="incall_mute"
|
||||
/>,
|
||||
<VideoButton
|
||||
key="2"
|
||||
muted={!isCameraEnabled}
|
||||
muted={!muteStates.video.enabled}
|
||||
onPress={toggleCamera}
|
||||
disabled={muteStates.video.setEnabled === null}
|
||||
data-testid="incall_videomute"
|
||||
/>
|
||||
);
|
||||
@@ -462,7 +452,6 @@ export function InCallView({
|
||||
<SettingsModal
|
||||
client={client}
|
||||
roomId={groupCall.room.roomId}
|
||||
mediaDevicesSwitcher={roomMediaSwitcher}
|
||||
{...settingsModalProps}
|
||||
/>
|
||||
)}
|
||||
|
||||
@@ -14,7 +14,14 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
import { useRef, useEffect, useState, useCallback, ChangeEvent } from "react";
|
||||
import {
|
||||
useRef,
|
||||
useEffect,
|
||||
useState,
|
||||
useCallback,
|
||||
ChangeEvent,
|
||||
FC,
|
||||
} from "react";
|
||||
import { Trans, useTranslation } from "react-i18next";
|
||||
|
||||
import styles from "./LobbyView.module.css";
|
||||
@@ -25,20 +32,26 @@ import { UserMenuContainer } from "../UserMenuContainer";
|
||||
import { Body, Link } from "../typography/Typography";
|
||||
import { useLocationNavigation } from "../useLocationNavigation";
|
||||
import { MatrixInfo, VideoPreview } from "./VideoPreview";
|
||||
import { E2EEConfig, UserChoices } from "../livekit/useLiveKit";
|
||||
import { E2EEConfig } from "../livekit/useLiveKit";
|
||||
import { InputField } from "../input/Input";
|
||||
import { useEnableE2EE } from "../settings/useSetting";
|
||||
import { MuteStates } from "./MuteStates";
|
||||
|
||||
interface Props {
|
||||
matrixInfo: MatrixInfo;
|
||||
|
||||
onEnter: (userChoices: UserChoices, e2eeConfig?: E2EEConfig) => void;
|
||||
muteStates: MuteStates;
|
||||
onEnter: (e2eeConfig?: E2EEConfig) => void;
|
||||
isEmbedded: boolean;
|
||||
hideHeader: boolean;
|
||||
initWithMutedAudio: boolean;
|
||||
}
|
||||
|
||||
export function LobbyView(props: Props) {
|
||||
export const LobbyView: FC<Props> = ({
|
||||
matrixInfo,
|
||||
muteStates,
|
||||
onEnter,
|
||||
isEmbedded,
|
||||
hideHeader,
|
||||
}) => {
|
||||
const { t } = useTranslation();
|
||||
useLocationNavigation();
|
||||
|
||||
@@ -51,9 +64,6 @@ export function LobbyView(props: Props) {
|
||||
}
|
||||
}, [joinCallButtonRef]);
|
||||
|
||||
const [userChoices, setUserChoices] = useState<UserChoices | undefined>(
|
||||
undefined
|
||||
);
|
||||
const [e2eeSharedKey, setE2EESharedKey] = useState<string | undefined>(
|
||||
undefined
|
||||
);
|
||||
@@ -68,10 +78,10 @@ export function LobbyView(props: Props) {
|
||||
|
||||
return (
|
||||
<div className={styles.room}>
|
||||
{!props.hideHeader && (
|
||||
{!hideHeader && (
|
||||
<Header>
|
||||
<LeftNav>
|
||||
<RoomHeaderInfo roomName={props.matrixInfo.roomName} />
|
||||
<RoomHeaderInfo roomName={matrixInfo.roomName} />
|
||||
</LeftNav>
|
||||
<RightNav>
|
||||
<UserMenuContainer />
|
||||
@@ -80,11 +90,7 @@ export function LobbyView(props: Props) {
|
||||
)}
|
||||
<div className={styles.joinRoom}>
|
||||
<div className={styles.joinRoomContent}>
|
||||
<VideoPreview
|
||||
matrixInfo={props.matrixInfo}
|
||||
initWithMutedAudio={props.initWithMutedAudio}
|
||||
onUserChoicesChanged={setUserChoices}
|
||||
/>
|
||||
<VideoPreview matrixInfo={matrixInfo} muteStates={muteStates} />
|
||||
{enableE2EE && (
|
||||
<InputField
|
||||
className={styles.passwordField}
|
||||
@@ -100,8 +106,7 @@ export function LobbyView(props: Props) {
|
||||
className={styles.copyButton}
|
||||
size="lg"
|
||||
onPress={() =>
|
||||
props.onEnter(
|
||||
userChoices!,
|
||||
onEnter(
|
||||
e2eeSharedKey ? { sharedKey: e2eeSharedKey } : undefined
|
||||
)
|
||||
}
|
||||
@@ -112,9 +117,7 @@ export function LobbyView(props: Props) {
|
||||
<Body>Or</Body>
|
||||
<CopyButton
|
||||
variant="secondaryCopy"
|
||||
value={getRoomUrl(
|
||||
props.matrixInfo.roomAlias ?? props.matrixInfo.roomId
|
||||
)}
|
||||
value={getRoomUrl(matrixInfo.roomAlias ?? matrixInfo.roomId)}
|
||||
className={styles.copyButton}
|
||||
copiedMessage={t("Call link copied")}
|
||||
data-testid="lobby_inviteLink"
|
||||
@@ -123,7 +126,7 @@ export function LobbyView(props: Props) {
|
||||
</CopyButton>
|
||||
</Trans>
|
||||
</div>
|
||||
{!props.isEmbedded && (
|
||||
{!isEmbedded && (
|
||||
<Body className={styles.joinRoomFooter}>
|
||||
<Link color="primary" to="/">
|
||||
{t("Take me Home")}
|
||||
@@ -133,4 +136,4 @@ export function LobbyView(props: Props) {
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
77
src/room/MuteStates.ts
Normal file
77
src/room/MuteStates.ts
Normal file
@@ -0,0 +1,77 @@
|
||||
/*
|
||||
Copyright 2023 New Vector Ltd
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
import { Dispatch, SetStateAction, useMemo } from "react";
|
||||
|
||||
import { MediaDevice, useMediaDevices } from "../livekit/MediaDevicesContext";
|
||||
import { useReactiveState } from "../useReactiveState";
|
||||
|
||||
/**
|
||||
* If there already is this many participants in the call, we automatically mute
|
||||
* the user
|
||||
*/
|
||||
const MUTE_PARTICIPANT_COUNT = 8;
|
||||
|
||||
interface DeviceAvailable {
|
||||
enabled: boolean;
|
||||
setEnabled: Dispatch<SetStateAction<boolean>>;
|
||||
}
|
||||
|
||||
interface DeviceUnavailable {
|
||||
enabled: false;
|
||||
setEnabled: null;
|
||||
}
|
||||
|
||||
const deviceUnavailable: DeviceUnavailable = {
|
||||
enabled: false,
|
||||
setEnabled: null,
|
||||
};
|
||||
|
||||
type MuteState = DeviceAvailable | DeviceUnavailable;
|
||||
|
||||
export interface MuteStates {
|
||||
audio: MuteState;
|
||||
video: MuteState;
|
||||
}
|
||||
|
||||
function useMuteState(
|
||||
device: MediaDevice,
|
||||
enabledByDefault: () => boolean
|
||||
): MuteState {
|
||||
const [enabled, setEnabled] = useReactiveState<boolean>(
|
||||
(prev) => device.available.length > 0 && (prev ?? enabledByDefault()),
|
||||
[device]
|
||||
);
|
||||
return useMemo(
|
||||
() =>
|
||||
device.available.length === 0
|
||||
? deviceUnavailable
|
||||
: { enabled, setEnabled },
|
||||
[device, enabled, setEnabled]
|
||||
);
|
||||
}
|
||||
|
||||
export function useMuteStates(participantCount: number): MuteStates {
|
||||
const devices = useMediaDevices();
|
||||
|
||||
const audio = useMuteState(
|
||||
devices.audioInput,
|
||||
() => participantCount <= MUTE_PARTICIPANT_COUNT
|
||||
);
|
||||
const video = useMuteState(devices.videoInput, () => true);
|
||||
|
||||
return useMemo(() => ({ audio, video }), [audio, video]);
|
||||
}
|
||||
@@ -14,12 +14,16 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
import React, { useState, useEffect, useCallback, useRef } from "react";
|
||||
import { useEffect, useCallback, useMemo, useRef, FC } from "react";
|
||||
import useMeasure from "react-use-measure";
|
||||
import { ResizeObserver } from "@juggle/resize-observer";
|
||||
import { OverlayTriggerState } from "@react-stately/overlays";
|
||||
import { usePreviewTracks } from "@livekit/components-react";
|
||||
import { LocalAudioTrack, LocalVideoTrack, Track } from "livekit-client";
|
||||
import {
|
||||
CreateLocalTracksOptions,
|
||||
LocalVideoTrack,
|
||||
Track,
|
||||
} from "livekit-client";
|
||||
|
||||
import { MicButton, SettingsButton, VideoButton } from "../button";
|
||||
import { Avatar } from "../Avatar";
|
||||
@@ -27,9 +31,8 @@ import styles from "./VideoPreview.module.css";
|
||||
import { useModalTriggerState } from "../Modal";
|
||||
import { SettingsModal } from "../settings/SettingsModal";
|
||||
import { useClient } from "../ClientContext";
|
||||
import { useMediaDevicesSwitcher } from "../livekit/useMediaDevicesSwitcher";
|
||||
import { UserChoices } from "../livekit/useLiveKit";
|
||||
import { useDefaultDevices } from "../settings/useSetting";
|
||||
import { useMediaDevices } from "../livekit/MediaDevicesContext";
|
||||
import { MuteStates } from "./MuteStates";
|
||||
|
||||
export type MatrixInfo = {
|
||||
displayName: string;
|
||||
@@ -41,15 +44,10 @@ export type MatrixInfo = {
|
||||
|
||||
interface Props {
|
||||
matrixInfo: MatrixInfo;
|
||||
initWithMutedAudio: boolean;
|
||||
onUserChoicesChanged: (choices: UserChoices) => void;
|
||||
muteStates: MuteStates;
|
||||
}
|
||||
|
||||
export function VideoPreview({
|
||||
matrixInfo,
|
||||
initWithMutedAudio,
|
||||
onUserChoicesChanged,
|
||||
}: Props) {
|
||||
export const VideoPreview: FC<Props> = ({ matrixInfo, muteStates }) => {
|
||||
const { client } = useClient();
|
||||
const [previewRef, previewBounds] = useMeasure({ polyfill: ResizeObserver });
|
||||
|
||||
@@ -68,92 +66,41 @@ export function VideoPreview({
|
||||
settingsModalState.open();
|
||||
}, [settingsModalState]);
|
||||
|
||||
// Create local media tracks.
|
||||
const [videoEnabled, setVideoEnabled] = useState<boolean>(true);
|
||||
const [audioEnabled, setAudioEnabled] = useState<boolean>(
|
||||
!initWithMutedAudio
|
||||
);
|
||||
const devices = useMediaDevices();
|
||||
|
||||
const initialAudioOptions = useRef<CreateLocalTracksOptions["audio"]>();
|
||||
initialAudioOptions.current ??= muteStates.audio.enabled && {
|
||||
deviceId: devices.audioInput.selectedId,
|
||||
};
|
||||
|
||||
// The settings are updated as soon as the device changes. We wrap the settings value in a ref to store their initial value.
|
||||
// Not changing the device options prohibits the usePreviewTracks hook to recreate the tracks.
|
||||
const initialDefaultDevices = useRef(useDefaultDevices()[0]);
|
||||
const tracks = usePreviewTracks(
|
||||
{
|
||||
audio: { deviceId: initialDefaultDevices.current.audioinput },
|
||||
video: { deviceId: initialDefaultDevices.current.videoinput },
|
||||
// The only reason we request audio here is to get the audio permission
|
||||
// request over with at the same time. But changing the audio settings
|
||||
// shouldn't cause this hook to recreate the track, which is why we
|
||||
// reference the initial values here.
|
||||
audio: initialAudioOptions.current,
|
||||
video: muteStates.video.enabled && {
|
||||
deviceId: devices.videoInput.selectedId,
|
||||
},
|
||||
},
|
||||
(error) => {
|
||||
console.error("Error while creating preview Tracks:", error);
|
||||
}
|
||||
);
|
||||
const videoTrack = React.useMemo(
|
||||
const videoTrack = useMemo(
|
||||
() =>
|
||||
tracks?.filter((t) => t.kind === Track.Kind.Video)[0] as LocalVideoTrack,
|
||||
[tracks]
|
||||
);
|
||||
const audioTrack = React.useMemo(
|
||||
() =>
|
||||
tracks?.filter((t) => t.kind === Track.Kind.Audio)[0] as LocalAudioTrack,
|
||||
tracks?.find((t) => t.kind === Track.Kind.Video) as
|
||||
| LocalVideoTrack
|
||||
| undefined,
|
||||
[tracks]
|
||||
);
|
||||
|
||||
// Only let the MediaDeviceSwitcher request permissions if a video track is already available.
|
||||
// Otherwise we would end up asking for permissions in usePreviewTracks and in useMediaDevicesSwitcher.
|
||||
const requestPermissions = !!audioTrack && !!videoTrack;
|
||||
const mediaSwitcher = useMediaDevicesSwitcher(
|
||||
undefined,
|
||||
{ videoTrack, audioTrack },
|
||||
requestPermissions
|
||||
);
|
||||
const { videoIn, audioIn } = mediaSwitcher;
|
||||
|
||||
const videoEl = React.useRef(null);
|
||||
|
||||
useEffect(() => {
|
||||
// Effect to update the settings
|
||||
onUserChoicesChanged({
|
||||
video: {
|
||||
selectedId: videoIn.selectedId,
|
||||
enabled: videoEnabled,
|
||||
},
|
||||
audio: {
|
||||
selectedId: audioIn.selectedId,
|
||||
enabled: audioEnabled,
|
||||
},
|
||||
});
|
||||
}, [
|
||||
onUserChoicesChanged,
|
||||
videoIn.selectedId,
|
||||
videoEnabled,
|
||||
audioIn.selectedId,
|
||||
audioEnabled,
|
||||
videoTrack,
|
||||
audioTrack,
|
||||
]);
|
||||
|
||||
useEffect(() => {
|
||||
// Effect to update the initial device selection for the ui elements based on the current preview track.
|
||||
if (!videoIn.selectedId || videoIn.selectedId == "") {
|
||||
videoTrack?.getDeviceId().then((videoId) => {
|
||||
videoIn.setSelected(videoId ?? "default");
|
||||
});
|
||||
}
|
||||
if (!audioIn.selectedId || audioIn.selectedId == "") {
|
||||
audioTrack?.getDeviceId().then((audioId) => {
|
||||
// getDeviceId() can return undefined for audio devices. This happens if
|
||||
// the devices list uses "default" as the device id for the current
|
||||
// device and the device set on the track also uses the deviceId
|
||||
// "default". Check `normalizeDeviceId` in `getDeviceId` for more
|
||||
// details.
|
||||
audioIn.setSelected(audioId ?? "default");
|
||||
});
|
||||
}
|
||||
}, [videoIn, audioIn, videoTrack, audioTrack]);
|
||||
const videoEl = useRef<HTMLVideoElement | null>(null);
|
||||
|
||||
useEffect(() => {
|
||||
// Effect to connect the videoTrack with the video element.
|
||||
if (videoEl.current) {
|
||||
videoTrack?.unmute();
|
||||
videoTrack?.attach(videoEl.current);
|
||||
}
|
||||
return () => {
|
||||
@@ -161,20 +108,20 @@ export function VideoPreview({
|
||||
};
|
||||
}, [videoTrack]);
|
||||
|
||||
useEffect(() => {
|
||||
// Effect to mute/unmute video track. (This has to be done, so that the hardware camera indicator does not confuse the user)
|
||||
if (videoTrack && videoEnabled) {
|
||||
videoTrack?.unmute();
|
||||
} else if (videoTrack) {
|
||||
videoTrack?.mute();
|
||||
}
|
||||
}, [videoEnabled, videoTrack]);
|
||||
const onAudioPress = useCallback(
|
||||
() => muteStates.audio.setEnabled?.((e) => !e),
|
||||
[muteStates]
|
||||
);
|
||||
const onVideoPress = useCallback(
|
||||
() => muteStates.video.setEnabled?.((e) => !e),
|
||||
[muteStates]
|
||||
);
|
||||
|
||||
return (
|
||||
<div className={styles.preview} ref={previewRef}>
|
||||
<video ref={videoEl} muted playsInline disablePictureInPicture />
|
||||
<>
|
||||
{(videoTrack ? !videoEnabled : true) && (
|
||||
{!muteStates.video.enabled && (
|
||||
<div className={styles.avatarContainer}>
|
||||
<Avatar
|
||||
size={(previewBounds.height - 66) / 2}
|
||||
@@ -185,25 +132,21 @@ export function VideoPreview({
|
||||
)}
|
||||
<div className={styles.previewButtons}>
|
||||
<MicButton
|
||||
muted={!audioEnabled}
|
||||
onPress={() => setAudioEnabled(!audioEnabled)}
|
||||
disabled={!audioTrack}
|
||||
muted={!muteStates.audio.enabled}
|
||||
onPress={onAudioPress}
|
||||
disabled={muteStates.audio.setEnabled === null}
|
||||
/>
|
||||
<VideoButton
|
||||
muted={!videoEnabled}
|
||||
onPress={() => setVideoEnabled(!videoEnabled)}
|
||||
disabled={!videoTrack}
|
||||
muted={!muteStates.video.enabled}
|
||||
onPress={onVideoPress}
|
||||
disabled={muteStates.video.setEnabled === null}
|
||||
/>
|
||||
<SettingsButton onPress={openSettings} />
|
||||
</div>
|
||||
</>
|
||||
{settingsModalState.isOpen && client && (
|
||||
<SettingsModal
|
||||
client={client}
|
||||
mediaDevicesSwitcher={mediaSwitcher}
|
||||
{...settingsModalProps}
|
||||
/>
|
||||
<SettingsModal client={client} {...settingsModalProps} />
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
@@ -43,14 +43,14 @@ import { Body, Caption } from "../typography/Typography";
|
||||
import { AnalyticsNotice } from "../analytics/AnalyticsNotice";
|
||||
import { ProfileSettingsTab } from "./ProfileSettingsTab";
|
||||
import { FeedbackSettingsTab } from "./FeedbackSettingsTab";
|
||||
import {
|
||||
MediaDevices,
|
||||
MediaDevicesState,
|
||||
} from "../livekit/useMediaDevicesSwitcher";
|
||||
import { useUrlParams } from "../UrlParams";
|
||||
import {
|
||||
useMediaDevices,
|
||||
MediaDevice,
|
||||
useMediaDeviceNames,
|
||||
} from "../livekit/MediaDevicesContext";
|
||||
|
||||
interface Props {
|
||||
mediaDevicesSwitcher?: MediaDevicesState;
|
||||
isOpen: boolean;
|
||||
client: MatrixClient;
|
||||
roomId?: string;
|
||||
@@ -74,7 +74,7 @@ export const SettingsModal = (props: Props) => {
|
||||
const downloadDebugLog = useDownloadDebugLog();
|
||||
|
||||
// Generate a `SelectInput` with a list of devices for a given device kind.
|
||||
const generateDeviceSelection = (devices: MediaDevices, caption: string) => {
|
||||
const generateDeviceSelection = (devices: MediaDevice, caption: string) => {
|
||||
if (devices.available.length == 0) return null;
|
||||
|
||||
return (
|
||||
@@ -85,7 +85,7 @@ export const SettingsModal = (props: Props) => {
|
||||
? "default"
|
||||
: devices.selectedId
|
||||
}
|
||||
onSelectionChange={(id) => devices.setSelected(id.toString())}
|
||||
onSelectionChange={(id) => devices.select(id.toString())}
|
||||
>
|
||||
{devices.available.map(({ deviceId, label }, index) => (
|
||||
<Item key={deviceId}>
|
||||
@@ -118,7 +118,8 @@ export const SettingsModal = (props: Props) => {
|
||||
</Caption>
|
||||
);
|
||||
|
||||
const devices = props.mediaDevicesSwitcher;
|
||||
const devices = useMediaDevices();
|
||||
useMediaDeviceNames(devices);
|
||||
|
||||
const audioTab = (
|
||||
<TabItem
|
||||
@@ -130,8 +131,8 @@ export const SettingsModal = (props: Props) => {
|
||||
</>
|
||||
}
|
||||
>
|
||||
{devices && generateDeviceSelection(devices.audioIn, t("Microphone"))}
|
||||
{devices && generateDeviceSelection(devices.audioOut, t("Speaker"))}
|
||||
{generateDeviceSelection(devices.audioInput, t("Microphone"))}
|
||||
{generateDeviceSelection(devices.audioOutput, t("Speaker"))}
|
||||
</TabItem>
|
||||
);
|
||||
|
||||
@@ -145,7 +146,7 @@ export const SettingsModal = (props: Props) => {
|
||||
</>
|
||||
}
|
||||
>
|
||||
{devices && generateDeviceSelection(devices.videoIn, t("Camera"))}
|
||||
{generateDeviceSelection(devices.videoInput, t("Camera"))}
|
||||
</TabItem>
|
||||
);
|
||||
|
||||
@@ -280,8 +281,7 @@ export const SettingsModal = (props: Props) => {
|
||||
</TabItem>
|
||||
);
|
||||
|
||||
const tabs: JSX.Element[] = [];
|
||||
if (devices) tabs.push(audioTab, videoTab);
|
||||
const tabs = [audioTab, videoTab];
|
||||
if (!isEmbedded) tabs.push(profileTab);
|
||||
tabs.push(feedbackTab, moreTab);
|
||||
if (developerSettingsTab) tabs.push(developerTab);
|
||||
|
||||
@@ -115,9 +115,9 @@ export const useDeveloperSettingsTab = () =>
|
||||
export const useShowConnectionStats = () =>
|
||||
useSetting("show-connection-stats", false);
|
||||
|
||||
export const useDefaultDevices = () =>
|
||||
useSetting("defaultDevices", {
|
||||
audioinput: "",
|
||||
videoinput: "",
|
||||
audiooutput: "",
|
||||
});
|
||||
export const useAudioInput = () =>
|
||||
useSetting<string | undefined>("audio-input", undefined);
|
||||
export const useAudioOutput = () =>
|
||||
useSetting<string | undefined>("audio-output", undefined);
|
||||
export const useVideoInput = () =>
|
||||
useSetting<string | undefined>("video-input", undefined);
|
||||
|
||||
Reference in New Issue
Block a user