diff --git a/src/livekit/useECConnectionState.ts b/src/livekit/useECConnectionState.ts index 4e7b4f5a..533e2a14 100644 --- a/src/livekit/useECConnectionState.ts +++ b/src/livekit/useECConnectionState.ts @@ -17,6 +17,7 @@ limitations under the License. import { AudioCaptureOptions, ConnectionState, + LocalTrack, Room, RoomEvent, Track, @@ -55,8 +56,6 @@ async function doConnect( audioEnabled: boolean, audioOptions: AudioCaptureOptions, ): Promise { - await livekitRoom!.connect(sfuConfig!.url, sfuConfig!.jwt); - // Always create an audio track manually. // livekit (by default) keeps the mic track open when you mute, but if you start muted, // doesn't publish it until you unmute. We want to publish it from the start so we're @@ -82,6 +81,8 @@ async function doConnect( } if (!audioEnabled) await audioTracks[0].mute(); + logger.info("Pre-created microphone track"); + // check again having awaited for the track to create if (livekitRoom!.localParticipant.getTrack(Track.Source.Microphone)) { logger.warn( @@ -89,8 +90,48 @@ async function doConnect( ); return; } - logger.info("Publishing pre-created mic track"); - await livekitRoom?.localParticipant.publishTrack(audioTracks[0]); + + logger.info("Connecting & publishing"); + try { + await connectAndPublish(livekitRoom, sfuConfig, audioTracks[0], []); + } catch (e) { + for (const t of audioTracks) { + t.stop(); + } + } +} + +/** + * Connect to the SFU and publish specific tracks, if provided. + * This is very specific to what we need to do: for instance, we don't + * currently have a need to prepublish video tracks. We just prepublish + * a mic track at the start of a call and copy any srceenshare tracks over + * when switching focus (because we can't re-acquire them without the user + * going through the dialog to choose them again). + */ +async function connectAndPublish( + livekitRoom: Room, + sfuConfig: SFUConfig, + micTrack: LocalTrack | undefined, + screenshareTracks: MediaStreamTrack[], +): Promise { + await livekitRoom!.connect(sfuConfig!.url, sfuConfig!.jwt); + + if (micTrack) { + logger.info(`Publishing precreated mic track`); + await livekitRoom.localParticipant.publishTrack(micTrack, { + source: Track.Source.Microphone, + }); + } + + logger.info( + `Publishing ${screenshareTracks.length} precreated screenshare tracks`, + ); + for (const st of screenshareTracks) { + livekitRoom.localParticipant.publishTrack(st, { + source: Track.Source.ScreenShare, + }); + } } export function useECConnectionState( @@ -126,6 +167,31 @@ export function useECConnectionState( }; }, [livekitRoom, onConnStateChanged]); + const doFocusSwitch = useCallback(async (): Promise => { + const screenshareTracks: MediaStreamTrack[] = []; + for (const t of livekitRoom!.localParticipant.videoTracks.values()) { + if (t.track && t.source == Track.Source.ScreenShare) { + const newTrack = t.track.mediaStreamTrack.clone(); + newTrack.enabled = true; + screenshareTracks.push(newTrack); + } + } + + setSwitchingFocus(true); + await livekitRoom?.disconnect(); + setIsInDoConnect(true); + try { + await connectAndPublish( + livekitRoom!, + sfuConfig!, + undefined, + screenshareTracks, + ); + } finally { + setIsInDoConnect(false); + } + }, [livekitRoom, sfuConfig]); + const currentSFUConfig = useRef(Object.assign({}, sfuConfig)); // Id we are transitioning from a valid config to another valid one, we need @@ -140,21 +206,7 @@ export function useECConnectionState( `SFU config changed! URL was ${currentSFUConfig.current?.url} now ${sfuConfig?.url}`, ); - (async (): Promise => { - setSwitchingFocus(true); - await livekitRoom?.disconnect(); - setIsInDoConnect(true); - try { - await doConnect( - livekitRoom!, - sfuConfig!, - initialAudioEnabled, - initialAudioOptions, - ); - } finally { - setIsInDoConnect(false); - } - })(); + doFocusSwitch(); } else if ( !sfuConfigValid(currentSFUConfig.current) && sfuConfigValid(sfuConfig) @@ -175,7 +227,13 @@ export function useECConnectionState( } currentSFUConfig.current = Object.assign({}, sfuConfig); - }, [sfuConfig, livekitRoom, initialAudioOptions, initialAudioEnabled]); + }, [ + sfuConfig, + livekitRoom, + initialAudioOptions, + initialAudioEnabled, + doFocusSwitch, + ]); // Because we create audio tracks by hand, there's more to connecting than // just what LiveKit does in room.connect, and we should continue to return diff --git a/src/livekit/useLiveKit.ts b/src/livekit/useLiveKit.ts index 991a4440..50921b93 100644 --- a/src/livekit/useLiveKit.ts +++ b/src/livekit/useLiveKit.ts @@ -22,8 +22,7 @@ import { RoomOptions, Track, } from "livekit-client"; -import { useLiveKitRoom } from "@livekit/components-react"; -import { useEffect, useMemo, useRef, useState } from "react"; +import { useEffect, useMemo, useRef } from "react"; import E2EEWorker from "livekit-client/e2ee-worker?worker"; import { logger } from "matrix-js-sdk/src/logger"; import { MatrixRTCSession } from "matrix-js-sdk/src/matrixrtc/MatrixRTCSession"; @@ -118,11 +117,6 @@ export function useLiveKit( [e2eeOptions], ); - // useECConnectionState creates and publishes an audio track by hand. To keep - // this from racing with LiveKit's automatic creation of the audio track, we - // block audio from being enabled until the connection is finished. - const [blockAudio, setBlockAudio] = useState(true); - // Store if audio/video are currently updating. If to prohibit unnecessary calls // to setMicrophoneEnabled/setCameraEnabled const audioMuteUpdating = useRef(false); @@ -137,19 +131,11 @@ export function useLiveKit( // We have to create the room manually here due to a bug inside // @livekit/components-react. JSON.stringify() is used in deps of a // useEffect() with an argument that references itself, if E2EE is enabled - const roomWithoutProps = useMemo(() => { + const room = useMemo(() => { const r = new Room(roomOptions); r.setE2EEEnabled(e2eeConfig.mode !== E2eeType.NONE); return r; }, [roomOptions, e2eeConfig]); - const { room } = useLiveKitRoom({ - token: sfuConfig?.jwt, - serverUrl: sfuConfig?.url, - audio: initialMuteStates.current.audio.enabled && !blockAudio, - video: initialMuteStates.current.video.enabled, - room: roomWithoutProps, - connect: false, - }); const connectionState = useECConnectionState( { @@ -160,11 +146,6 @@ export function useLiveKit( sfuConfig, ); - // Unblock audio once the connection is finished - useEffect(() => { - if (connectionState === ConnectionState.Connected) setBlockAudio(false); - }, [connectionState, setBlockAudio]); - useEffect(() => { // Sync the requested mute states with LiveKit's mute states. We do it this // way around rather than using LiveKit as the source of truth, so that the diff --git a/src/room/InCallView.tsx b/src/room/InCallView.tsx index a6cd5ffa..0511ac68 100644 --- a/src/room/InCallView.tsx +++ b/src/room/InCallView.tsx @@ -200,8 +200,9 @@ export const InCallView: FC = ({ ); const onLeavePress = useCallback(() => { + livekitRoom.disconnect(); onLeave(); - }, [onLeave]); + }, [livekitRoom, onLeave]); useEffect(() => { widget?.api.transport.send(