Merge pull request #1892 from vector-im/dbkr/focus_switch_keep_screenshare
Keep screenshares when switching focus
This commit is contained in:
@@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
import {
|
import {
|
||||||
AudioCaptureOptions,
|
AudioCaptureOptions,
|
||||||
ConnectionState,
|
ConnectionState,
|
||||||
|
LocalTrack,
|
||||||
Room,
|
Room,
|
||||||
RoomEvent,
|
RoomEvent,
|
||||||
Track,
|
Track,
|
||||||
@@ -55,8 +56,6 @@ async function doConnect(
|
|||||||
audioEnabled: boolean,
|
audioEnabled: boolean,
|
||||||
audioOptions: AudioCaptureOptions,
|
audioOptions: AudioCaptureOptions,
|
||||||
): Promise<void> {
|
): Promise<void> {
|
||||||
await livekitRoom!.connect(sfuConfig!.url, sfuConfig!.jwt);
|
|
||||||
|
|
||||||
// Always create an audio track manually.
|
// Always create an audio track manually.
|
||||||
// livekit (by default) keeps the mic track open when you mute, but if you start muted,
|
// livekit (by default) keeps the mic track open when you mute, but if you start muted,
|
||||||
// doesn't publish it until you unmute. We want to publish it from the start so we're
|
// doesn't publish it until you unmute. We want to publish it from the start so we're
|
||||||
@@ -82,6 +81,8 @@ async function doConnect(
|
|||||||
}
|
}
|
||||||
if (!audioEnabled) await audioTracks[0].mute();
|
if (!audioEnabled) await audioTracks[0].mute();
|
||||||
|
|
||||||
|
logger.info("Pre-created microphone track");
|
||||||
|
|
||||||
// check again having awaited for the track to create
|
// check again having awaited for the track to create
|
||||||
if (livekitRoom!.localParticipant.getTrack(Track.Source.Microphone)) {
|
if (livekitRoom!.localParticipant.getTrack(Track.Source.Microphone)) {
|
||||||
logger.warn(
|
logger.warn(
|
||||||
@@ -92,8 +93,48 @@ async function doConnect(
|
|||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
logger.info("Publishing pre-created mic track");
|
|
||||||
await livekitRoom?.localParticipant.publishTrack(audioTracks[0]);
|
logger.info("Connecting & publishing");
|
||||||
|
try {
|
||||||
|
await connectAndPublish(livekitRoom, sfuConfig, audioTracks[0], []);
|
||||||
|
} catch (e) {
|
||||||
|
for (const t of audioTracks) {
|
||||||
|
t.stop();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Connect to the SFU and publish specific tracks, if provided.
|
||||||
|
* This is very specific to what we need to do: for instance, we don't
|
||||||
|
* currently have a need to prepublish video tracks. We just prepublish
|
||||||
|
* a mic track at the start of a call and copy any srceenshare tracks over
|
||||||
|
* when switching focus (because we can't re-acquire them without the user
|
||||||
|
* going through the dialog to choose them again).
|
||||||
|
*/
|
||||||
|
async function connectAndPublish(
|
||||||
|
livekitRoom: Room,
|
||||||
|
sfuConfig: SFUConfig,
|
||||||
|
micTrack: LocalTrack | undefined,
|
||||||
|
screenshareTracks: MediaStreamTrack[],
|
||||||
|
): Promise<void> {
|
||||||
|
await livekitRoom!.connect(sfuConfig!.url, sfuConfig!.jwt);
|
||||||
|
|
||||||
|
if (micTrack) {
|
||||||
|
logger.info(`Publishing precreated mic track`);
|
||||||
|
await livekitRoom.localParticipant.publishTrack(micTrack, {
|
||||||
|
source: Track.Source.Microphone,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
`Publishing ${screenshareTracks.length} precreated screenshare tracks`,
|
||||||
|
);
|
||||||
|
for (const st of screenshareTracks) {
|
||||||
|
livekitRoom.localParticipant.publishTrack(st, {
|
||||||
|
source: Track.Source.ScreenShare,
|
||||||
|
});
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
export function useECConnectionState(
|
export function useECConnectionState(
|
||||||
@@ -129,6 +170,33 @@ export function useECConnectionState(
|
|||||||
};
|
};
|
||||||
}, [livekitRoom, onConnStateChanged]);
|
}, [livekitRoom, onConnStateChanged]);
|
||||||
|
|
||||||
|
const doFocusSwitch = useCallback(async (): Promise<void> => {
|
||||||
|
const screenshareTracks: MediaStreamTrack[] = [];
|
||||||
|
for (const t of livekitRoom!.localParticipant.videoTracks.values()) {
|
||||||
|
if (t.track && t.source == Track.Source.ScreenShare) {
|
||||||
|
const newTrack = t.track.mediaStreamTrack.clone();
|
||||||
|
newTrack.enabled = true;
|
||||||
|
screenshareTracks.push(newTrack);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Flag that we're currently switching focus. This will get reset when the
|
||||||
|
// connection state changes back to connected in onConnStateChanged above.
|
||||||
|
setSwitchingFocus(true);
|
||||||
|
await livekitRoom?.disconnect();
|
||||||
|
setIsInDoConnect(true);
|
||||||
|
try {
|
||||||
|
await connectAndPublish(
|
||||||
|
livekitRoom!,
|
||||||
|
sfuConfig!,
|
||||||
|
undefined,
|
||||||
|
screenshareTracks,
|
||||||
|
);
|
||||||
|
} finally {
|
||||||
|
setIsInDoConnect(false);
|
||||||
|
}
|
||||||
|
}, [livekitRoom, sfuConfig]);
|
||||||
|
|
||||||
const currentSFUConfig = useRef(Object.assign({}, sfuConfig));
|
const currentSFUConfig = useRef(Object.assign({}, sfuConfig));
|
||||||
|
|
||||||
// Id we are transitioning from a valid config to another valid one, we need
|
// Id we are transitioning from a valid config to another valid one, we need
|
||||||
@@ -143,21 +211,7 @@ export function useECConnectionState(
|
|||||||
`SFU config changed! URL was ${currentSFUConfig.current?.url} now ${sfuConfig?.url}`,
|
`SFU config changed! URL was ${currentSFUConfig.current?.url} now ${sfuConfig?.url}`,
|
||||||
);
|
);
|
||||||
|
|
||||||
(async (): Promise<void> => {
|
doFocusSwitch();
|
||||||
setSwitchingFocus(true);
|
|
||||||
await livekitRoom?.disconnect();
|
|
||||||
setIsInDoConnect(true);
|
|
||||||
try {
|
|
||||||
await doConnect(
|
|
||||||
livekitRoom!,
|
|
||||||
sfuConfig!,
|
|
||||||
initialAudioEnabled,
|
|
||||||
initialAudioOptions,
|
|
||||||
);
|
|
||||||
} finally {
|
|
||||||
setIsInDoConnect(false);
|
|
||||||
}
|
|
||||||
})();
|
|
||||||
} else if (
|
} else if (
|
||||||
!sfuConfigValid(currentSFUConfig.current) &&
|
!sfuConfigValid(currentSFUConfig.current) &&
|
||||||
sfuConfigValid(sfuConfig)
|
sfuConfigValid(sfuConfig)
|
||||||
@@ -178,7 +232,13 @@ export function useECConnectionState(
|
|||||||
}
|
}
|
||||||
|
|
||||||
currentSFUConfig.current = Object.assign({}, sfuConfig);
|
currentSFUConfig.current = Object.assign({}, sfuConfig);
|
||||||
}, [sfuConfig, livekitRoom, initialAudioOptions, initialAudioEnabled]);
|
}, [
|
||||||
|
sfuConfig,
|
||||||
|
livekitRoom,
|
||||||
|
initialAudioOptions,
|
||||||
|
initialAudioEnabled,
|
||||||
|
doFocusSwitch,
|
||||||
|
]);
|
||||||
|
|
||||||
// Because we create audio tracks by hand, there's more to connecting than
|
// Because we create audio tracks by hand, there's more to connecting than
|
||||||
// just what LiveKit does in room.connect, and we should continue to return
|
// just what LiveKit does in room.connect, and we should continue to return
|
||||||
|
|||||||
@@ -22,8 +22,7 @@ import {
|
|||||||
RoomOptions,
|
RoomOptions,
|
||||||
Track,
|
Track,
|
||||||
} from "livekit-client";
|
} from "livekit-client";
|
||||||
import { useLiveKitRoom } from "@livekit/components-react";
|
import { useEffect, useMemo, useRef } from "react";
|
||||||
import { useEffect, useMemo, useRef, useState } from "react";
|
|
||||||
import E2EEWorker from "livekit-client/e2ee-worker?worker";
|
import E2EEWorker from "livekit-client/e2ee-worker?worker";
|
||||||
import { logger } from "matrix-js-sdk/src/logger";
|
import { logger } from "matrix-js-sdk/src/logger";
|
||||||
import { MatrixRTCSession } from "matrix-js-sdk/src/matrixrtc/MatrixRTCSession";
|
import { MatrixRTCSession } from "matrix-js-sdk/src/matrixrtc/MatrixRTCSession";
|
||||||
@@ -118,11 +117,6 @@ export function useLiveKit(
|
|||||||
[e2eeOptions],
|
[e2eeOptions],
|
||||||
);
|
);
|
||||||
|
|
||||||
// useECConnectionState creates and publishes an audio track by hand. To keep
|
|
||||||
// this from racing with LiveKit's automatic creation of the audio track, we
|
|
||||||
// block audio from being enabled until the connection is finished.
|
|
||||||
const [blockAudio, setBlockAudio] = useState(true);
|
|
||||||
|
|
||||||
// Store if audio/video are currently updating. If to prohibit unnecessary calls
|
// Store if audio/video are currently updating. If to prohibit unnecessary calls
|
||||||
// to setMicrophoneEnabled/setCameraEnabled
|
// to setMicrophoneEnabled/setCameraEnabled
|
||||||
const audioMuteUpdating = useRef(false);
|
const audioMuteUpdating = useRef(false);
|
||||||
@@ -137,19 +131,11 @@ export function useLiveKit(
|
|||||||
// We have to create the room manually here due to a bug inside
|
// We have to create the room manually here due to a bug inside
|
||||||
// @livekit/components-react. JSON.stringify() is used in deps of a
|
// @livekit/components-react. JSON.stringify() is used in deps of a
|
||||||
// useEffect() with an argument that references itself, if E2EE is enabled
|
// useEffect() with an argument that references itself, if E2EE is enabled
|
||||||
const roomWithoutProps = useMemo(() => {
|
const room = useMemo(() => {
|
||||||
const r = new Room(roomOptions);
|
const r = new Room(roomOptions);
|
||||||
r.setE2EEEnabled(e2eeConfig.mode !== E2eeType.NONE);
|
r.setE2EEEnabled(e2eeConfig.mode !== E2eeType.NONE);
|
||||||
return r;
|
return r;
|
||||||
}, [roomOptions, e2eeConfig]);
|
}, [roomOptions, e2eeConfig]);
|
||||||
const { room } = useLiveKitRoom({
|
|
||||||
token: sfuConfig?.jwt,
|
|
||||||
serverUrl: sfuConfig?.url,
|
|
||||||
audio: initialMuteStates.current.audio.enabled && !blockAudio,
|
|
||||||
video: initialMuteStates.current.video.enabled,
|
|
||||||
room: roomWithoutProps,
|
|
||||||
connect: false,
|
|
||||||
});
|
|
||||||
|
|
||||||
const connectionState = useECConnectionState(
|
const connectionState = useECConnectionState(
|
||||||
{
|
{
|
||||||
@@ -160,11 +146,6 @@ export function useLiveKit(
|
|||||||
sfuConfig,
|
sfuConfig,
|
||||||
);
|
);
|
||||||
|
|
||||||
// Unblock audio once the connection is finished
|
|
||||||
useEffect(() => {
|
|
||||||
if (connectionState === ConnectionState.Connected) setBlockAudio(false);
|
|
||||||
}, [connectionState, setBlockAudio]);
|
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
// Sync the requested mute states with LiveKit's mute states. We do it this
|
// Sync the requested mute states with LiveKit's mute states. We do it this
|
||||||
// way around rather than using LiveKit as the source of truth, so that the
|
// way around rather than using LiveKit as the source of truth, so that the
|
||||||
|
|||||||
@@ -200,8 +200,12 @@ export const InCallView: FC<InCallViewProps> = ({
|
|||||||
);
|
);
|
||||||
|
|
||||||
const onLeavePress = useCallback(() => {
|
const onLeavePress = useCallback(() => {
|
||||||
|
// Disconnect from the room. We don't do this in onLeave because that's
|
||||||
|
// also called on an unintentional disconnect. Plus we don't have the
|
||||||
|
// livekit room in onLeave anyway.
|
||||||
|
livekitRoom.disconnect();
|
||||||
onLeave();
|
onLeave();
|
||||||
}, [onLeave]);
|
}, [livekitRoom, onLeave]);
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
widget?.api.transport.send(
|
widget?.api.transport.send(
|
||||||
|
|||||||
Reference in New Issue
Block a user