From 56328108ca9534e34b7c12b5b8e38282c5d5f563 Mon Sep 17 00:00:00 2001 From: Timo Date: Wed, 14 May 2025 10:41:03 +0200 Subject: [PATCH 01/44] Add custom audio renderer to only render joined participants & add ios earpice workaround fix left right to match chromium + safari (firefox is swapped) earpice as setting Simpler code and documentation The doc explains, what this class actually does and why it is so complicated. Signed-off-by: Timo K use only one audioContext, remove (non working) standby fallback --- locales/en/app.json | 2 + src/livekit/MatrixAudioRenderer.tsx | 201 ++++++++++++++++++++++++++ src/livekit/MediaDevicesContext.tsx | 74 +++++++++- src/room/InCallView.tsx | 12 +- src/settings/DeveloperSettingsTab.tsx | 18 +++ src/settings/DeviceSelection.tsx | 30 +++- src/settings/SettingsModal.tsx | 8 +- src/settings/settings.ts | 8 + src/useAudioContext.tsx | 28 +++- 9 files changed, 351 insertions(+), 30 deletions(-) create mode 100644 src/livekit/MatrixAudioRenderer.tsx diff --git a/locales/en/app.json b/locales/en/app.json index 963a3f555..0b4c05992 100644 --- a/locales/en/app.json +++ b/locales/en/app.json @@ -61,6 +61,7 @@ "video": "Video" }, "developer_mode": { + "always_show_iphone_earpiece": "Show iPhone earpiece option on all platforms", "crypto_version": "Crypto version: {{version}}", "debug_tile_layout_label": "Debug tile layout", "device_id": "Device ID: {{id}}", @@ -174,6 +175,7 @@ "camera_numbered": "Camera {{n}}", "default": "Default", "default_named": "Default <2>({{name}})", + "earpiece": "Earpiece", "microphone": "Microphone", "microphone_numbered": "Microphone {{n}}", "speaker": "Speaker", diff --git a/src/livekit/MatrixAudioRenderer.tsx b/src/livekit/MatrixAudioRenderer.tsx new file mode 100644 index 000000000..4b8218f36 --- /dev/null +++ b/src/livekit/MatrixAudioRenderer.tsx @@ -0,0 +1,201 @@ +/* +Copyright 2025 New Vector Ltd. + +SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial +Please see LICENSE in the repository root for full details. +*/ + +import { getTrackReferenceId } from "@livekit/components-core"; +import { type RemoteAudioTrack, Track } from "livekit-client"; +import { useEffect, useMemo, useRef, type ReactNode } from "react"; +import { + useTracks, + AudioTrack, + type AudioTrackProps, +} from "@livekit/components-react"; +import { type CallMembership } from "matrix-js-sdk/lib/matrixrtc"; +import { logger } from "matrix-js-sdk/lib/logger"; + +import { useEarpieceAudioConfig } from "./MediaDevicesContext"; +import { useReactiveState } from "../useReactiveState"; + +export interface MatrixAudioRendererProps { + /** + * The list of participants to render audio for. + * This list needs to be composed based on the matrixRTC members so that we do not play audio from users + * that are not expected to be in the rtc session. + */ + members: CallMembership[]; + /** + * If set to `true`, mutes all audio tracks rendered by the component. + * @remarks + * If set to `true`, the server will stop sending audio track data to the client. + */ + muted?: boolean; +} + +/** + * The `MatrixAudioRenderer` component is a drop-in solution for adding audio to your LiveKit app. + * It takes care of handling remote participants’ audio tracks and makes sure that microphones and screen share are audible. + * + * It also takes care of the earpiece audio configuration for iOS devices. + * This is done by using the WebAudio API to create a stereo pan effect that mimics the earpiece audio. + * @example + * ```tsx + * + * + * + * ``` + * @public + */ +export function MatrixAudioRenderer({ + members, + muted, +}: MatrixAudioRendererProps): ReactNode { + const validIdentities = useMemo( + () => + new Set(members?.map((member) => `${member.sender}:${member.deviceId}`)), + [members], + ); + + const loggedInvalidIdentities = useRef(new Set()); + /** + * Log an invalid livekit track identity. + * A invalid identity is one that does not match any of the matrix rtc members. + * + * @param identity The identity of the track that is invalid + * @param validIdentities The list of valid identities + */ + const logInvalid = (identity: string, validIdentities: Set): void => { + if (loggedInvalidIdentities.current.has(identity)) return; + logger.warn( + `Audio track ${identity} has no matching matrix call member`, + `current members: ${Array.from(validIdentities.values())}`, + `track will not get rendered`, + ); + loggedInvalidIdentities.current.add(identity); + }; + + const tracks = useTracks( + [ + Track.Source.Microphone, + Track.Source.ScreenShareAudio, + Track.Source.Unknown, + ], + { + updateOnlyOn: [], + onlySubscribed: true, + }, + ).filter((ref) => { + const isValid = validIdentities?.has(ref.participant.identity); + if (!isValid) logInvalid(ref.participant.identity, validIdentities); + return ( + !ref.participant.isLocal && + ref.publication.kind === Track.Kind.Audio && + isValid + ); + }); + + // This component is also (in addition to the "only play audio for connected members" logic above) + // to mimic earpice audio on iphones. + // The safari audio devices enumeration does not expose an earpice audio device. + // We alternatively use the audioContext pan node to only use one of the stereo channels. + + // This component does get additionally complicated because of a safari bug. + // (see: https://bugs.webkit.org/show_bug.cgi?id=251532 + // and the related issues: https://bugs.webkit.org/show_bug.cgi?id=237878 + // and https://bugs.webkit.org/show_bug.cgi?id=231105) + // + // AudioContext gets stopped if the webview gets moved into the background. + // Once the phone is in standby audio playback will stop. + // So we can only use the pan trick only works is the phone is not in standby. + // If earpice mode is not used we do not use audioContext to allow standby playback. + // shouldUseAudioContext is set to false if stereoPan === 0 to allow standby bluetooth playback. + + const { pan: stereoPan, volume: volumeFactor } = useEarpieceAudioConfig(); + const shouldUseAudioContext = stereoPan !== 0; + + // initialize the potentially used audio context. + const audioContext = useMemo(() => new AudioContext(), []); + const audioNodes = useMemo( + () => ({ + gain: audioContext.createGain(), + pan: audioContext.createStereoPanner(), + }), + [audioContext], + ); + + // Simple effects to update the gain and pan node based on the props + useEffect(() => { + audioNodes.pan.pan.value = stereoPan; + }, [audioNodes.pan.pan, stereoPan]); + useEffect(() => { + // *4 to balance the transition from audio context to normal audio playback. + // probably needed due to gain behaving differently than el.volume + audioNodes.gain.gain.value = volumeFactor; + }, [audioNodes.gain.gain, volumeFactor]); + + return ( + // We add all audio elements into one
for the browser developer tool experience/tidyness. +
+ {tracks.map((trackRef) => ( + + ))} +
+ ); +} + +interface StereoPanAudioTrackProps { + muted?: boolean; + audioContext?: AudioContext; + audioNodes: { + gain: GainNode; + pan: StereoPannerNode; + }; +} + +/** + * This wraps `livekit.AudioTrack` to allow adding audio nodes to a track. + * It main purpose is to remount the AudioTrack component when switching from + * audiooContext to normal audio playback. + * As of now the AudioTrack component does not support adding audio nodes while being mounted. + * @param param0 + * @returns + */ +function AudioTrackWithAudioNodes({ + trackRef, + muted, + audioContext, + audioNodes, + ...props +}: StereoPanAudioTrackProps & + AudioTrackProps & + React.RefAttributes): ReactNode { + // This is used to unmount/remount the AudioTrack component. + // Mounting needs to happen after the audioContext is set. + // (adding the audio context when already mounted did not work outside strict mode) + const [trackReady, setTrackReady] = useReactiveState( + () => false, + [audioContext || audioNodes], + ); + + useEffect(() => { + if (!trackRef || trackReady) return; + const track = trackRef.publication.track as RemoteAudioTrack; + track.setAudioContext(audioContext); + track.setWebAudioPlugins( + audioContext ? [audioNodes.gain, audioNodes.pan] : [], + ); + setTrackReady(true); + }, [audioContext, audioNodes, setTrackReady, trackReady, trackRef]); + + return ( + trackReady && + ); +} diff --git a/src/livekit/MediaDevicesContext.tsx b/src/livekit/MediaDevicesContext.tsx index c2fc63e52..3e30c04dd 100644 --- a/src/livekit/MediaDevicesContext.tsx +++ b/src/livekit/MediaDevicesContext.tsx @@ -27,11 +27,15 @@ import { audioOutput as audioOutputSetting, videoInput as videoInputSetting, type Setting, + alwaysShowIphoneEarpieceSetting, } from "../settings/settings"; +export const EARPIECE_CONFIG_ID = "earpiece-id"; + export type DeviceLabel = | { type: "name"; name: string } | { type: "number"; number: number } + | { type: "earpiece" } | { type: "default"; name: string | null }; export interface MediaDevice { @@ -40,6 +44,11 @@ export interface MediaDevice { */ available: Map; selectedId: string | undefined; + /** + * An additional device configuration that makes us use only one channel of the + * output device and a reduced volume. + */ + useAsEarpiece: boolean | undefined; /** * The group ID of the selected device. */ @@ -65,6 +74,7 @@ function useMediaDevice( ): MediaDevice { // Make sure we don't needlessly reset to a device observer without names, // once permissions are already given + const [alwaysShowIphoneEarpice] = useSetting(alwaysShowIphoneEarpieceSetting); const hasRequestedPermissions = useRef(false); const requestPermissions = usingNames || hasRequestedPermissions.current; hasRequestedPermissions.current ||= usingNames; @@ -102,27 +112,39 @@ function useMediaDevice( // Create a virtual default audio output for browsers that don't have one. // Its device ID must be the empty string because that's what setSinkId // recognizes. + // We also create this if we do not have any available devices, so that + // we can use the default or the earpiece. + const showEarpiece = + navigator.userAgent.match("iPhone") || alwaysShowIphoneEarpice; if ( kind === "audiooutput" && - available.size && !available.has("") && - !available.has("default") + !available.has("default") && + (available.size || showEarpiece) ) available = new Map([ ["", { type: "default", name: availableRaw[0]?.label || null }], ...available, ]); + if (kind === "audiooutput" && showEarpiece) + // On IPhones we have to create a virtual earpiece device, because + // the earpiece is not available as a device ID. + available = new Map([ + ...available, + [EARPIECE_CONFIG_ID, { type: "earpiece" }], + ]); // Note: creating virtual default input devices would be another problem // entirely, because requesting a media stream from deviceId "" won't // automatically track the default device. return available; }), ), - [kind, deviceObserver$], + [alwaysShowIphoneEarpice, deviceObserver$, kind], ), ); - const [preferredId, select] = useSetting(setting); + const [preferredId, setPreferredId] = useSetting(setting); + const [asEarpice, setAsEarpiece] = useState(false); const selectedId = useMemo(() => { if (available.size) { // If the preferred device is available, use it. Or if every available @@ -138,6 +160,7 @@ function useMediaDevice( } return undefined; }, [available, preferredId]); + const selectedGroupId = useObservableEagerState( useMemo( () => @@ -151,14 +174,27 @@ function useMediaDevice( ), ); + const select = useCallback( + (id: string) => { + if (id === EARPIECE_CONFIG_ID) { + setAsEarpiece(true); + } else { + setAsEarpiece(false); + setPreferredId(id); + } + }, + [setPreferredId], + ); + return useMemo( () => ({ available, selectedId, + useAsEarpiece: asEarpice, selectedGroupId, select, }), - [available, selectedId, selectedGroupId, select], + [available, selectedId, asEarpice, selectedGroupId, select], ); } @@ -167,6 +203,7 @@ export const deviceStub: MediaDevice = { selectedId: undefined, selectedGroupId: undefined, select: () => {}, + useAsEarpiece: false, }; export const devicesStub: MediaDevices = { audioInput: deviceStub, @@ -255,3 +292,30 @@ export const useMediaDeviceNames = ( return context.stopUsingDeviceNames; } }, [context, enabled]); + +/** + * A convenience hook to get the audio node configuration for the earpiece. + * It will check the `useAsEarpiece` of the `audioOutput` device and return + * the appropriate pan and volume values. + * + * @returns pan and volume values for the earpiece audio node configuration. + */ +export const useEarpieceAudioConfig = (): { + pan: number; + volume: number; +} => { + const { audioOutput } = useMediaDevices(); + // We use only the right speaker (pan = 1) for the earpiece. + // This mimics the behavior of the native earpiece speaker (only the top speaker on an iPhone) + const pan = useMemo( + () => (audioOutput.useAsEarpiece ? 1 : 0), + [audioOutput.useAsEarpiece], + ); + // We also do lower the volume by a factor of 10 to optimize for the usecase where + // a user is holding the phone to their ear. + const volume = useMemo( + () => (audioOutput.useAsEarpiece ? 0.1 : 1), + [audioOutput.useAsEarpiece], + ); + return { pan, volume }; +}; diff --git a/src/room/InCallView.tsx b/src/room/InCallView.tsx index 768ddfdd7..9fa155471 100644 --- a/src/room/InCallView.tsx +++ b/src/room/InCallView.tsx @@ -5,11 +5,7 @@ SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial Please see LICENSE in the repository root for full details. */ -import { - RoomAudioRenderer, - RoomContext, - useLocalParticipant, -} from "@livekit/components-react"; +import { RoomContext, useLocalParticipant } from "@livekit/components-react"; import { Text } from "@vector-im/compound-web"; import { ConnectionState, type Room } from "livekit-client"; import { type MatrixClient } from "matrix-js-sdk"; @@ -107,6 +103,7 @@ import { import { ReactionsReader } from "../reactions/ReactionsReader"; import { ConnectionLostError } from "../utils/errors.ts"; import { useTypedEventEmitter } from "../useEvents.ts"; +import { MatrixAudioRenderer } from "../livekit/MatrixAudioRenderer.tsx"; const canScreenshare = "getDisplayMedia" in (navigator.mediaDevices ?? {}); @@ -713,7 +710,10 @@ export const InCallView: FC = ({ ) } - + {renderContent()} diff --git a/src/settings/DeveloperSettingsTab.tsx b/src/settings/DeveloperSettingsTab.tsx index fdeaa704f..36df5c39d 100644 --- a/src/settings/DeveloperSettingsTab.tsx +++ b/src/settings/DeveloperSettingsTab.tsx @@ -18,6 +18,7 @@ import { useNewMembershipManager as useNewMembershipManagerSetting, useExperimentalToDeviceTransport as useExperimentalToDeviceTransportSetting, muteAllAudio as muteAllAudioSetting, + alwaysShowIphoneEarpiece as alwaysShowIphoneEarpieceSetting, } from "./settings"; import type { MatrixClient } from "matrix-js-sdk"; import type { Room as LivekitRoom } from "livekit-client"; @@ -46,6 +47,9 @@ export const DeveloperSettingsTab: FC = ({ client, livekitRoom }) => { useNewMembershipManagerSetting, ); + const [alwaysShowIphoneEarpiece, setAlwaysShowIphoneEarpiece] = useSetting( + alwaysShowIphoneEarpieceSetting, + ); const [ useExperimentalToDeviceTransport, setUseExperimentalToDeviceTransport, @@ -192,6 +196,20 @@ export const DeveloperSettingsTab: FC = ({ client, livekitRoom }) => { [setMuteAllAudio], )} /> + {" "} + + ): void => { + setAlwaysShowIphoneEarpiece(event.target.checked); + }, + [setAlwaysShowIphoneEarpiece], + )} + />{" "} {livekitRoom ? ( <> diff --git a/src/settings/DeviceSelection.tsx b/src/settings/DeviceSelection.tsx index 0bdabbe7f..12e89f560 100644 --- a/src/settings/DeviceSelection.tsx +++ b/src/settings/DeviceSelection.tsx @@ -22,17 +22,20 @@ import { } from "@vector-im/compound-web"; import { Trans, useTranslation } from "react-i18next"; -import { type MediaDevice } from "../livekit/MediaDevicesContext"; +import { + EARPIECE_CONFIG_ID, + type MediaDevice, +} from "../livekit/MediaDevicesContext"; import styles from "./DeviceSelection.module.css"; interface Props { - devices: MediaDevice; + device: MediaDevice; title: string; numberedLabel: (number: number) => string; } export const DeviceSelection: FC = ({ - devices, + device, title, numberedLabel, }) => { @@ -40,12 +43,13 @@ export const DeviceSelection: FC = ({ const groupId = useId(); const onChange = useCallback( (e: ChangeEvent) => { - devices.select(e.target.value); + device.select(e.target.value); }, - [devices], + [device], ); - if (devices.available.size == 0) return null; + // There is no need to show the menu if there is no choice that can be made. + if (device.available.size == 1) return null; return (
@@ -60,7 +64,7 @@ export const DeviceSelection: FC = ({
- {[...devices.available].map(([id, label]) => { + {[...device.available].map(([id, label]) => { let labelText: ReactNode; switch (label.type) { case "name": @@ -85,6 +89,16 @@ export const DeviceSelection: FC = ({ ); break; + case "earpiece": + labelText = t("settings.devices.earpiece"); + break; + } + + let isSelected = false; + if (device.useAsEarpiece) { + isSelected = id === EARPIECE_CONFIG_ID; + } else { + isSelected = id === device.selectedId; } return ( @@ -93,7 +107,7 @@ export const DeviceSelection: FC = ({ name={groupId} control={ diff --git a/src/settings/SettingsModal.tsx b/src/settings/SettingsModal.tsx index b0a4b79e0..1c97a87d5 100644 --- a/src/settings/SettingsModal.tsx +++ b/src/settings/SettingsModal.tsx @@ -98,7 +98,6 @@ export const SettingsModal: FC = ({ useMediaDeviceNames(devices, open); const [soundVolume, setSoundVolume] = useSetting(soundEffectVolumeSetting); const [soundVolumeRaw, setSoundVolumeRaw] = useState(soundVolume); - const [showDeveloperSettingsTab] = useSetting(developerMode); const { available: isRageshakeAvailable } = useSubmitRageshake(); @@ -110,17 +109,18 @@ export const SettingsModal: FC = ({ <>
t("settings.devices.microphone_numbered", { n }) } /> t("settings.devices.speaker_numbered", { n })} /> +

{t("settings.audio_tab.effect_volume_description")}

@@ -146,7 +146,7 @@ export const SettingsModal: FC = ({ <> t("settings.devices.camera_numbered", { n })} /> diff --git a/src/settings/settings.ts b/src/settings/settings.ts index f63148ef1..0c7b91914 100644 --- a/src/settings/settings.ts +++ b/src/settings/settings.ts @@ -44,6 +44,9 @@ export class Setting { this._value$.next(value); localStorage.setItem(this.key, JSON.stringify(value)); }; + public readonly getValue = (): T => { + return this._value$.getValue(); + }; } /** @@ -128,3 +131,8 @@ export const useExperimentalToDeviceTransport = new Setting( export const muteAllAudio = new Setting("mute-all-audio", false); export const alwaysShowSelf = new Setting("always-show-self", true); + +export const alwaysShowIphoneEarpiece = new Setting( + "always-show-iphone-earpice", + false, +); diff --git a/src/useAudioContext.tsx b/src/useAudioContext.tsx index da94f3879..5a689fdf8 100644 --- a/src/useAudioContext.tsx +++ b/src/useAudioContext.tsx @@ -12,7 +12,10 @@ import { soundEffectVolume as soundEffectVolumeSetting, useSetting, } from "./settings/settings"; -import { useMediaDevices } from "./livekit/MediaDevicesContext"; +import { + useEarpieceAudioConfig, + useMediaDevices, +} from "./livekit/MediaDevicesContext"; import { type PrefetchedSounds } from "./soundUtils"; /** @@ -28,12 +31,15 @@ async function playSound( ctx: AudioContext, buffer: AudioBuffer, volume: number, + stereoPan: number, ): Promise { const gain = ctx.createGain(); gain.gain.setValueAtTime(volume, 0); + const pan = ctx.createStereoPanner(); + pan.pan.setValueAtTime(stereoPan, 0); const src = ctx.createBufferSource(); src.buffer = buffer; - src.connect(gain).connect(ctx.destination); + src.connect(gain).connect(pan).connect(ctx.destination); const p = new Promise((r) => src.addEventListener("ended", () => r())); src.start(); return p; @@ -63,8 +69,9 @@ interface UseAudioContext { export function useAudioContext( props: Props, ): UseAudioContext | null { - const [effectSoundVolume] = useSetting(soundEffectVolumeSetting); - const devices = useMediaDevices(); + const [soundEffectVolume] = useSetting(soundEffectVolumeSetting); + const { audioOutput } = useMediaDevices(); + const [audioContext, setAudioContext] = useState(); const [audioBuffers, setAudioBuffers] = useState>(); @@ -106,23 +113,30 @@ export function useAudioContext( if (audioContext && "setSinkId" in audioContext) { // https://developer.mozilla.org/en-US/docs/Web/API/AudioContext/setSinkId // @ts-expect-error - setSinkId doesn't exist yet in types, maybe because it's not supported everywhere. - audioContext.setSinkId(devices.audioOutput.selectedId).catch((ex) => { + audioContext.setSinkId(audioOutput.selectedId).catch((ex) => { logger.warn("Unable to change sink for audio context", ex); }); } - }, [audioContext, devices]); + }, [audioContext, audioOutput.selectedId]); + const { pan: earpiecePan, volume: earpieceVolume } = useEarpieceAudioConfig(); // Don't return a function until we're ready. if (!audioContext || !audioBuffers || props.muted) { return null; } + return { playSound: async (name): Promise => { if (!audioBuffers[name]) { logger.debug(`Tried to play a sound that wasn't buffered (${name})`); return; } - return playSound(audioContext, audioBuffers[name], effectSoundVolume); + return playSound( + audioContext, + audioBuffers[name], + soundEffectVolume * earpieceVolume, + earpiecePan, + ); }, }; } From 6b8c620bbb3ae309e7f645606d531e09716beffe Mon Sep 17 00:00:00 2001 From: Timo Date: Wed, 14 May 2025 10:41:08 +0200 Subject: [PATCH 02/44] Add tests --- playwright/access.spec.ts | 2 +- src/UserMenu.tsx | 2 +- src/livekit/MatrixAudioRenderer.test.tsx | 104 ++++++++++++++ src/livekit/MediaDevicesContext.tsx | 2 +- src/room/InCallView.test.tsx | 18 +-- src/room/MuteStates.test.tsx | 1 + .../__snapshots__/InCallView.test.tsx.snap | 2 +- src/useAudioContext.test.tsx | 136 +++++++++++------- src/utils/test.ts | 23 +++ 9 files changed, 228 insertions(+), 62 deletions(-) create mode 100644 src/livekit/MatrixAudioRenderer.test.tsx diff --git a/playwright/access.spec.ts b/playwright/access.spec.ts index 14a708739..da7ec3648 100644 --- a/playwright/access.spec.ts +++ b/playwright/access.spec.ts @@ -49,7 +49,7 @@ test("Sign up a new account, then login, then logout", async ({ browser }) => { // logout await returningUserPage.getByTestId("usermenu_open").click(); - await returningUserPage.locator('[data-test-id="usermenu_logout"]').click(); + await returningUserPage.locator('[data-testid="usermenu_logout"]').click(); await expect( returningUserPage.getByRole("link", { name: "Log In" }), diff --git a/src/UserMenu.tsx b/src/UserMenu.tsx index 52cc4a5ab..e431c328f 100644 --- a/src/UserMenu.tsx +++ b/src/UserMenu.tsx @@ -119,7 +119,7 @@ export const UserMenu: FC = ({ key={key} Icon={Icon} label={label} - data-test-id={dataTestid} + data-testid={dataTestid} onSelect={() => onAction(key)} /> ))} diff --git a/src/livekit/MatrixAudioRenderer.test.tsx b/src/livekit/MatrixAudioRenderer.test.tsx new file mode 100644 index 000000000..9d91e6fad --- /dev/null +++ b/src/livekit/MatrixAudioRenderer.test.tsx @@ -0,0 +1,104 @@ +/* +Copyright 2023, 2024 New Vector Ltd. + +SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial +Please see LICENSE in the repository root for full details. +*/ + +import { afterEach, beforeEach, expect, it, vi } from "vitest"; +import { render } from "@testing-library/react"; +import { type CallMembership } from "matrix-js-sdk/lib/matrixrtc"; +import { + getTrackReferenceId, + type TrackReference, +} from "@livekit/components-core"; +import { type RemoteAudioTrack } from "livekit-client"; +import { type ReactNode } from "react"; +import { useTracks } from "@livekit/components-react"; + +import { testAudioContext } from "../useAudioContext.test"; +import * as MediaDevicesContext from "./MediaDevicesContext"; +import { MatrixAudioRenderer } from "./MatrixAudioRenderer"; +import { mockTrack } from "../utils/test"; + +export const TestAudioContextConstructor = vi.fn(() => testAudioContext); + +beforeEach(() => { + vi.stubGlobal("AudioContext", TestAudioContextConstructor); +}); + +afterEach(() => { + vi.unstubAllGlobals(); + vi.clearAllMocks(); +}); + +vi.mock("@livekit/components-react", async (importOriginal) => { + return { + ...(await importOriginal()), // this will only affect "foo" outside of the original module + AudioTrack: (props: { trackRef: TrackReference }): ReactNode => { + return ( + + ); + }, + useTracks: vi.fn(), + }; +}); + +const tracks = [mockTrack("test:123")]; +vi.mocked(useTracks).mockReturnValue(tracks); + +it("should render for member", () => { + const { container, queryAllByTestId } = render( + , + ); + expect(container).toBeTruthy(); + expect(queryAllByTestId("audio")).toHaveLength(1); +}); +it("should not render without member", () => { + const { container, queryAllByTestId } = render( + , + ); + expect(container).toBeTruthy(); + expect(queryAllByTestId("audio")).toHaveLength(0); +}); + +it("should not setup audioContext gain and pan if there is no need to.", () => { + render( + , + ); + const audioTrack = tracks[0].publication.track! as RemoteAudioTrack; + + expect(audioTrack.setAudioContext).toHaveBeenCalledTimes(1); + expect(audioTrack.setAudioContext).toHaveBeenCalledWith(undefined); + expect(audioTrack.setWebAudioPlugins).toHaveBeenCalledTimes(1); + expect(audioTrack.setWebAudioPlugins).toHaveBeenCalledWith([]); + + expect(testAudioContext.gain.gain.value).toEqual(1); + expect(testAudioContext.pan.pan.value).toEqual(0); +}); +it("should setup audioContext gain and pan", () => { + vi.spyOn(MediaDevicesContext, "useEarpieceAudioConfig").mockReturnValue({ + pan: 1, + volume: 0.1, + }); + render( + , + ); + + const audioTrack = tracks[0].publication.track! as RemoteAudioTrack; + expect(audioTrack.setAudioContext).toHaveBeenCalled(); + expect(audioTrack.setWebAudioPlugins).toHaveBeenCalled(); + + expect(testAudioContext.gain.gain.value).toEqual(0.1); + expect(testAudioContext.pan.pan.value).toEqual(1); +}); diff --git a/src/livekit/MediaDevicesContext.tsx b/src/livekit/MediaDevicesContext.tsx index 3e30c04dd..7d82032ac 100644 --- a/src/livekit/MediaDevicesContext.tsx +++ b/src/livekit/MediaDevicesContext.tsx @@ -26,8 +26,8 @@ import { audioInput as audioInputSetting, audioOutput as audioOutputSetting, videoInput as videoInputSetting, + alwaysShowIphoneEarpiece as alwaysShowIphoneEarpieceSetting, type Setting, - alwaysShowIphoneEarpieceSetting, } from "../settings/settings"; export const EARPIECE_CONFIG_ID = "earpiece-id"; diff --git a/src/room/InCallView.test.tsx b/src/room/InCallView.test.tsx index 4d02160cb..f7bed6fe5 100644 --- a/src/room/InCallView.test.tsx +++ b/src/room/InCallView.test.tsx @@ -21,11 +21,7 @@ import { ConnectionState, type LocalParticipant } from "livekit-client"; import { of } from "rxjs"; import { BrowserRouter } from "react-router-dom"; import { TooltipProvider } from "@vector-im/compound-web"; -import { - RoomAudioRenderer, - RoomContext, - useLocalParticipant, -} from "@livekit/components-react"; +import { RoomContext, useLocalParticipant } from "@livekit/components-react"; import { RoomAndToDeviceEvents } from "matrix-js-sdk/lib/matrixrtc/RoomAndToDeviceKeyTransport"; import { type MuteStates } from "./MuteStates"; @@ -48,6 +44,8 @@ import { } from "../settings/settings"; import { ReactionsSenderProvider } from "../reactions/useReactionsSender"; import { useRoomEncryptionSystem } from "../e2ee/sharedKeyManagement"; +// import { testAudioContext } from "../useAudioContext.test"; +import { MatrixAudioRenderer } from "../livekit/MatrixAudioRenderer"; // vi.hoisted(() => { // localStorage = {} as unknown as Storage; @@ -65,6 +63,7 @@ vi.mock("../tile/GridTile"); vi.mock("../tile/SpotlightTile"); vi.mock("@livekit/components-react"); vi.mock("../e2ee/sharedKeyManagement"); +vi.mock("../livekit/MatrixAudioRenderer"); vi.mock("react-use-measure", () => ({ default: (): [() => void, object] => [(): void => {}, {}], })); @@ -81,13 +80,15 @@ const roomMembers = new Map([carol].map((p) => [p.userId, p])); const roomId = "!foo:bar"; let useRoomEncryptionSystemMock: MockedFunction; + beforeEach(() => { vi.clearAllMocks(); - // RoomAudioRenderer is tested separately. + + // MatrixAudioRenderer is tested separately. ( - RoomAudioRenderer as MockedFunction + MatrixAudioRenderer as MockedFunction ).mockImplementation((_props) => { - return
mocked: RoomAudioRenderer
; + return
mocked: MatrixAudioRenderer
; }); ( useLocalParticipant as MockedFunction @@ -98,7 +99,6 @@ beforeEach(() => { localParticipant: localRtcMember as unknown as LocalParticipant, }) as unknown as ReturnType, ); - useRoomEncryptionSystemMock = useRoomEncryptionSystem as typeof useRoomEncryptionSystemMock; useRoomEncryptionSystemMock.mockReturnValue({ kind: E2eeType.NONE }); diff --git a/src/room/MuteStates.test.tsx b/src/room/MuteStates.test.tsx index 4a375c8f9..eb0666038 100644 --- a/src/room/MuteStates.test.tsx +++ b/src/room/MuteStates.test.tsx @@ -79,6 +79,7 @@ function mockDevices(available: Map): MediaDevice { selectedId: "", selectedGroupId: "", select: (): void => {}, + useAsEarpiece: false, }; } diff --git a/src/room/__snapshots__/InCallView.test.tsx.snap b/src/room/__snapshots__/InCallView.test.tsx.snap index 427973b65..98edb8a49 100644 --- a/src/room/__snapshots__/InCallView.test.tsx.snap +++ b/src/room/__snapshots__/InCallView.test.tsx.snap @@ -9,7 +9,7 @@ exports[`InCallView > rendering > renders 1`] = ` class="header filler" />
- mocked: RoomAudioRenderer + mocked: MatrixAudioRenderer
{ ); }; -class MockAudioContext { - public static testContext: MockAudioContext; - - public constructor() { - MockAudioContext.testContext = this; - } - - public gain = vitest.mocked( - { - connect: () => {}, - gain: { - setValueAtTime: vitest.fn(), - }, +const gainNode = vi.mocked( + { + connect: (node: AudioNode) => node, + gain: { + setValueAtTime: vi.fn(), + value: 1, }, - true, - ); - - public setSinkId = vitest.fn().mockResolvedValue(undefined); - public decodeAudioData = vitest.fn().mockReturnValue(1); - public createBufferSource = vitest.fn().mockReturnValue( - vitest.mocked({ + }, + true, +); +const panNode = vi.mocked( + { + connect: (node: AudioNode) => node, + pan: { + setValueAtTime: vi.fn(), + value: 0, + }, + }, + true, +); +/** + * A shared audio context test instance. + * It can also be used to mock the `AudioContext` constructor in tests: + * `vi.stubGlobal("AudioContext", () => testAudioContext);` + */ +export const testAudioContext = { + gain: gainNode, + pan: panNode, + setSinkId: vi.fn().mockResolvedValue(undefined), + decodeAudioData: vi.fn().mockReturnValue(1), + createBufferSource: vi.fn().mockReturnValue( + vi.mocked({ connect: (v: unknown) => v, start: () => {}, addEventListener: (_name: string, cb: () => void) => cb(), }), - ); - public createGain = vitest.fn().mockReturnValue(this.gain); - public close = vitest.fn().mockResolvedValue(undefined); -} + ), + createGain: vi.fn().mockReturnValue(gainNode), + createStereoPanner: vi.fn().mockReturnValue(panNode), + close: vi.fn().mockResolvedValue(undefined), +}; +export const TestAudioContextConstructor = vi.fn(() => testAudioContext); + +let user: UserEvent; +beforeEach(() => { + vi.stubGlobal("AudioContext", TestAudioContextConstructor); + user = userEvent.setup(); +}); afterEach(() => { - vitest.unstubAllGlobals(); + vi.unstubAllGlobals(); + vi.clearAllMocks(); }); test("can play a single sound", async () => { - const user = userEvent.setup(); - vitest.stubGlobal("AudioContext", MockAudioContext); const { findByText } = render(); await user.click(await findByText("Valid sound")); - expect( - MockAudioContext.testContext.createBufferSource, - ).toHaveBeenCalledOnce(); + expect(testAudioContext.createBufferSource).toHaveBeenCalledOnce(); }); + test("will ignore sounds that are not registered", async () => { - const user = userEvent.setup(); - vitest.stubGlobal("AudioContext", MockAudioContext); const { findByText } = render(); await user.click(await findByText("Invalid sound")); - expect( - MockAudioContext.testContext.createBufferSource, - ).not.toHaveBeenCalled(); + expect(testAudioContext.createBufferSource).not.toHaveBeenCalled(); }); test("will use the correct device", () => { - vitest.stubGlobal("AudioContext", MockAudioContext); render( { selectedGroupId: "", available: new Map(), select: () => {}, + useAsEarpiece: false, }, videoInput: deviceStub, startUsingDeviceNames: () => {}, @@ -112,21 +125,46 @@ test("will use the correct device", () => { , ); - expect( - MockAudioContext.testContext.createBufferSource, - ).not.toHaveBeenCalled(); - expect(MockAudioContext.testContext.setSinkId).toHaveBeenCalledWith( - "chosen-device", - ); + expect(testAudioContext.createBufferSource).not.toHaveBeenCalled(); + expect(testAudioContext.setSinkId).toHaveBeenCalledWith("chosen-device"); }); test("will use the correct volume level", async () => { - const user = userEvent.setup(); - vitest.stubGlobal("AudioContext", MockAudioContext); soundEffectVolumeSetting.setValue(0.33); const { findByText } = render(); await user.click(await findByText("Valid sound")); - expect( - MockAudioContext.testContext.gain.gain.setValueAtTime, - ).toHaveBeenCalledWith(0.33, 0); + expect(testAudioContext.gain.gain.setValueAtTime).toHaveBeenCalledWith( + 0.33, + 0, + ); + expect(testAudioContext.pan.pan.setValueAtTime).toHaveBeenCalledWith(0, 0); +}); + +test("will use the pan if earpice is selected", async () => { + const { findByText } = render( + {}, + useAsEarpiece: true, + }, + videoInput: deviceStub, + startUsingDeviceNames: () => {}, + stopUsingDeviceNames: () => {}, + }} + > + + , + ); + await user.click(await findByText("Valid sound")); + expect(testAudioContext.pan.pan.setValueAtTime).toHaveBeenCalledWith(1, 0); + + expect(testAudioContext.gain.gain.setValueAtTime).toHaveBeenCalledWith( + soundEffectVolumeSetting.getValue() * 0.1, + 0, + ); }); diff --git a/src/utils/test.ts b/src/utils/test.ts index 6e1b54578..51ed1ed2c 100644 --- a/src/utils/test.ts +++ b/src/utils/test.ts @@ -27,12 +27,14 @@ import { type RemoteParticipant, type RemoteTrackPublication, type Room as LivekitRoom, + Track, } from "livekit-client"; import { randomUUID } from "crypto"; import { type RoomAndToDeviceEvents, type RoomAndToDeviceEventsHandlerMap, } from "matrix-js-sdk/lib/matrixrtc/RoomAndToDeviceKeyTransport"; +import { type TrackReference } from "@livekit/components-core"; import { LocalUserMediaViewModel, @@ -309,3 +311,24 @@ export class MockRTCSession extends TypedEventEmitter< return this; } } + +export const mockTrack = (identity: string): TrackReference => + ({ + participant: { + identity, + }, + publication: { + kind: Track.Kind.Audio, + source: "mic", + trackSid: "123", + track: { + attach: vi.fn(), + detach: vi.fn(), + setAudioContext: vi.fn(), + setWebAudioPlugins: vi.fn(), + setVolume: vi.fn(), + }, + }, + track: {}, + source: {}, + }) as unknown as TrackReference; From 18a59dd7dbb851eaa5ac44dfd12e577092031b3e Mon Sep 17 00:00:00 2001 From: Timo Date: Wed, 14 May 2025 17:23:42 +0200 Subject: [PATCH 03/44] use optional audio context and effect to initiate it + review --- src/livekit/MatrixAudioRenderer.test.tsx | 2 +- src/livekit/MatrixAudioRenderer.tsx | 51 ++++++++++++++---------- src/room/InCallView.test.tsx | 1 - src/settings/DeviceSelection.tsx | 2 +- 4 files changed, 33 insertions(+), 23 deletions(-) diff --git a/src/livekit/MatrixAudioRenderer.test.tsx b/src/livekit/MatrixAudioRenderer.test.tsx index 9d91e6fad..637e02ed2 100644 --- a/src/livekit/MatrixAudioRenderer.test.tsx +++ b/src/livekit/MatrixAudioRenderer.test.tsx @@ -34,7 +34,7 @@ afterEach(() => { vi.mock("@livekit/components-react", async (importOriginal) => { return { - ...(await importOriginal()), // this will only affect "foo" outside of the original module + ...(await importOriginal()), AudioTrack: (props: { trackRef: TrackReference }): ReactNode => { return (