Merge branch 'livekit' into eslint-upgrade

This commit is contained in:
Robin
2023-10-11 10:30:57 -04:00
100 changed files with 2600 additions and 9682 deletions

View File

@@ -20,6 +20,7 @@ import {
ExternalE2EEKeyProvider,
Room,
RoomOptions,
Track,
} from "livekit-client";
import { useLiveKitRoom } from "@livekit/components-react";
import { useEffect, useMemo, useRef, useState } from "react";
@@ -100,6 +101,17 @@ export function useLiveKit(
// block audio from being enabled until the connection is finished.
const [blockAudio, setBlockAudio] = useState(true);
// Store if audio/video are currently updating. If to prohibit unnecessary calls
// to setMicrophoneEnabled/setCameraEnabled
const audioMuteUpdating = useRef(false);
const videoMuteUpdating = useRef(false);
// Store the current button mute state that gets passed to this hook via props.
// We need to store it for awaited code that relies on the current value.
const buttonEnabled = useRef({
audio: initialMuteStates.current.audio.enabled,
video: initialMuteStates.current.video.enabled,
});
// We have to create the room manually here due to a bug inside
// @livekit/components-react. JSON.stringify() is used in deps of a
// useEffect() with an argument that references itself, if E2EE is enabled
@@ -136,20 +148,50 @@ export function useLiveKit(
// and setting tracks to be enabled during this time causes errors.
if (room !== undefined && connectionState === ConnectionState.Connected) {
const participant = room.localParticipant;
if (participant.isMicrophoneEnabled !== muteStates.audio.enabled) {
participant
.setMicrophoneEnabled(muteStates.audio.enabled)
.catch((e) =>
logger.error("Failed to sync audio mute state with LiveKit", e)
);
}
if (participant.isCameraEnabled !== muteStates.video.enabled) {
participant
.setCameraEnabled(muteStates.video.enabled)
.catch((e) =>
logger.error("Failed to sync video mute state with LiveKit", e)
);
}
// Always update the muteButtonState Ref so that we can read the current
// state in awaited blocks.
buttonEnabled.current = {
audio: muteStates.audio.enabled,
video: muteStates.video.enabled,
};
const syncMuteStateAudio = async (): Promise<void> => {
if (
participant.isMicrophoneEnabled !== buttonEnabled.current.audio &&
!audioMuteUpdating.current
) {
audioMuteUpdating.current = true;
try {
await participant.setMicrophoneEnabled(buttonEnabled.current.audio);
} catch (e) {
logger.error("Failed to sync audio mute state with LiveKit", e);
}
audioMuteUpdating.current = false;
// Run the check again after the change is done. Because the user
// can update the state (presses mute button) while the device is enabling
// itself we need might need to update the mute state right away.
// This async recursion makes sure that setCamera/MicrophoneEnabled is
// called as little times as possible.
syncMuteStateAudio();
}
};
const syncMuteStateVideo = async (): Promise<void> => {
if (
participant.isCameraEnabled !== buttonEnabled.current.video &&
!videoMuteUpdating.current
) {
videoMuteUpdating.current = true;
try {
await participant.setCameraEnabled(buttonEnabled.current.video);
} catch (e) {
logger.error("Failed to sync audio mute state with LiveKit", e);
}
videoMuteUpdating.current = false;
// see above
syncMuteStateVideo();
}
};
syncMuteStateAudio();
syncMuteStateVideo();
}
}, [room, muteStates, connectionState]);
@@ -158,12 +200,54 @@ export function useLiveKit(
if (room !== undefined && connectionState === ConnectionState.Connected) {
const syncDevice = (kind: MediaDeviceKind, device: MediaDevice): void => {
const id = device.selectedId;
if (id !== undefined && room.getActiveDevice(kind) !== id) {
room
.switchActiveDevice(kind, id)
.catch((e) =>
logger.error(`Failed to sync ${kind} device with LiveKit`, e)
);
// Detect if we're trying to use chrome's default device, in which case
// we need to to see if the default device has changed to a different device
// by comparing the group ID of the device we're using against the group ID
// of what the default device is *now*.
// This is special-cased for only audio inputs because we need to dig around
// in the LocalParticipant object for the track object and there's not a nice
// way to do that generically. There is usually no OS-level default video capture
// device anyway, and audio outputs work differently.
if (
id === "default" &&
kind === "audioinput" &&
room.options.audioCaptureDefaults?.deviceId === "default"
) {
const activeMicTrack = Array.from(
room.localParticipant.audioTracks.values()
).find((d) => d.source === Track.Source.Microphone)?.track;
const defaultDevice = device.available.find(
(d) => d.deviceId === "default"
);
if (
defaultDevice &&
activeMicTrack &&
// only restart if the stream is still running: LiveKit will detect
// when a track stops & restart appropriately, so this is not our job.
// Plus, we need to avoid restarting again if the track is already in
// the process of being restarted.
activeMicTrack.mediaStreamTrack.readyState !== "ended" &&
defaultDevice.groupId !==
activeMicTrack.mediaStreamTrack.getSettings().groupId
) {
// It's different, so restart the track, ie. cause Livekit to do another
// getUserMedia() call with deviceId: default to get the *new* default device.
// Note that room.switchActiveDevice() won't work: Livekit will ignore it because
// the deviceId hasn't changed (was & still is default).
room.localParticipant
.getTrack(Track.Source.Microphone)
?.audioTrack?.restartTrack();
}
} else {
if (id !== undefined && room.getActiveDevice(kind) !== id) {
room
.switchActiveDevice(kind, id)
.catch((e) =>
logger.error(`Failed to sync ${kind} device with LiveKit`, e)
);
}
}
};